Port renaming changes from AOMedia
Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7 Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*
Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
new file mode 100644
index 0000000..be89263
--- /dev/null
+++ b/av1/av1_common.mk
@@ -0,0 +1,139 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_COMMON_SRCS-yes += av1_common.mk
+AV1_COMMON_SRCS-yes += av1_iface_common.h
+AV1_COMMON_SRCS-yes += common/ans.h
+AV1_COMMON_SRCS-yes += common/alloccommon.c
+AV1_COMMON_SRCS-yes += common/blockd.c
+AV1_COMMON_SRCS-yes += common/debugmodes.c
+AV1_COMMON_SRCS-yes += common/divide.h
+AV1_COMMON_SRCS-yes += common/entropy.c
+AV1_COMMON_SRCS-yes += common/entropymode.c
+AV1_COMMON_SRCS-yes += common/entropymv.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.h
+AV1_COMMON_SRCS-yes += common/alloccommon.h
+AV1_COMMON_SRCS-yes += common/blockd.h
+AV1_COMMON_SRCS-yes += common/common.h
+AV1_COMMON_SRCS-yes += common/entropy.h
+AV1_COMMON_SRCS-yes += common/entropymode.h
+AV1_COMMON_SRCS-yes += common/entropymv.h
+AV1_COMMON_SRCS-yes += common/enums.h
+AV1_COMMON_SRCS-yes += common/filter.h
+AV1_COMMON_SRCS-yes += common/filter.c
+AV1_COMMON_SRCS-yes += common/idct.h
+AV1_COMMON_SRCS-yes += common/idct.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.c
+AV1_COMMON_SRCS-yes += common/loopfilter.h
+AV1_COMMON_SRCS-yes += common/thread_common.h
+AV1_COMMON_SRCS-yes += common/mv.h
+AV1_COMMON_SRCS-yes += common/onyxc_int.h
+AV1_COMMON_SRCS-yes += common/pred_common.h
+AV1_COMMON_SRCS-yes += common/pred_common.c
+AV1_COMMON_SRCS-yes += common/quant_common.h
+AV1_COMMON_SRCS-yes += common/reconinter.h
+AV1_COMMON_SRCS-yes += common/reconintra.h
+AV1_COMMON_SRCS-yes += common/av1_rtcd.c
+AV1_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
+AV1_COMMON_SRCS-yes += common/scale.h
+AV1_COMMON_SRCS-yes += common/scale.c
+AV1_COMMON_SRCS-yes += common/seg_common.h
+AV1_COMMON_SRCS-yes += common/seg_common.c
+AV1_COMMON_SRCS-yes += common/tile_common.h
+AV1_COMMON_SRCS-yes += common/tile_common.c
+AV1_COMMON_SRCS-yes += common/loopfilter.c
+AV1_COMMON_SRCS-yes += common/thread_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.h
+AV1_COMMON_SRCS-yes += common/quant_common.c
+AV1_COMMON_SRCS-yes += common/reconinter.c
+AV1_COMMON_SRCS-yes += common/reconintra.c
+AV1_COMMON_SRCS-yes += common/restoration.h
+AV1_COMMON_SRCS-yes += common/common_data.h
+AV1_COMMON_SRCS-yes += common/scan.c
+AV1_COMMON_SRCS-yes += common/scan.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.c
+AV1_COMMON_SRCS-yes += common/av1_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm1d.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm1d.c
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm2d.c
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm2d_cfg.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm2d.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm2d_cfg.h
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/av1_convolve_ssse3.c
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/av1_convolve_filters_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_highbd_convolve_sse4.c
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_highbd_convolve_filters_sse4.c
+endif
+AV1_COMMON_SRCS-yes += common/av1_convolve.c
+AV1_COMMON_SRCS-yes += common/av1_convolve.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/ans.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/divide.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/divide.c
+AV1_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.h
+AV1_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.c
+ifeq (yes,$(filter yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)))
+AV1_COMMON_SRCS-yes += common/warped_motion.h
+AV1_COMMON_SRCS-yes += common/warped_motion.c
+endif
+AV1_COMMON_SRCS-yes += common/clpf.c
+AV1_COMMON_SRCS-yes += common/clpf.h
+ifeq ($(CONFIG_DERING),yes)
+AV1_COMMON_SRCS-yes += common/od_dering.c
+AV1_COMMON_SRCS-yes += common/od_dering.h
+AV1_COMMON_SRCS-yes += common/dering.c
+AV1_COMMON_SRCS-yes += common/dering.h
+endif
+AV1_COMMON_SRCS-yes += common/odintrin.c
+AV1_COMMON_SRCS-yes += common/odintrin.h
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans4_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans8_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans16_dspr2.c
+endif
+
+# common (msa)
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
+
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_txfm1d_sse4.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm1d_sse4.c
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm2d_sse4.c
+
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_txfm_utility_sse4.h
+endif
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
+endif
+
+ifeq ($(CONFIG_EXT_INTRA),yes)
+AV1_COMMON_SRCS-yes += common/intra_filters.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/reconintra_sse4.c
+endif
+
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
+
+$(eval $(call rtcd_h_template,av1_rtcd,av1/common/av1_rtcd_defs.pl))
diff --git a/av1/vp10_cx_iface.c b/av1/av1_cx_iface.c
similarity index 61%
rename from av1/vp10_cx_iface.c
rename to av1/av1_cx_iface.c
index 34dd428..b5223e7 100644
--- a/av1/vp10_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -11,18 +11,18 @@
#include <stdlib.h>
#include <string.h>
-#include "./vpx_config.h"
-#include "aom/vpx_encoder.h"
-#include "aom_ports/vpx_once.h"
+#include "./aom_config.h"
+#include "aom/aom_encoder.h"
+#include "aom_ports/aom_once.h"
#include "aom_ports/system_state.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "./vpx_version.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "./aom_version.h"
#include "av1/encoder/encoder.h"
-#include "aom/vp8cx.h"
+#include "aom/aomcx.h"
#include "av1/encoder/firstpass.h"
-#include "av1/vp10_iface_common.h"
+#include "av1/av1_iface_common.h"
-struct vp10_extracfg {
+struct av1_extracfg {
int cpu_used; // available cpu percentage in 1/16
unsigned int enable_auto_alt_ref;
#if CONFIG_EXT_REFS
@@ -37,7 +37,7 @@
unsigned int arnr_strength;
unsigned int min_gf_interval;
unsigned int max_gf_interval;
- vpx_tune_metric tuning;
+ aom_tune_metric tuning;
unsigned int cq_level; // constrained quality level
unsigned int rc_max_intra_bitrate_pct;
unsigned int rc_max_inter_bitrate_pct;
@@ -51,16 +51,16 @@
unsigned int frame_parallel_decoding_mode;
AQ_MODE aq_mode;
unsigned int frame_periodic_boost;
- vpx_bit_depth_t bit_depth;
- vpx_tune_content content;
- vpx_color_space_t color_space;
+ aom_bit_depth_t bit_depth;
+ aom_tune_content content;
+ aom_color_space_t color_space;
int color_range;
int render_width;
int render_height;
- vpx_superblock_size_t superblock_size;
+ aom_superblock_size_t superblock_size;
};
-static struct vp10_extracfg default_extra_cfg = {
+static struct av1_extracfg default_extra_cfg = {
0, // cpu_used
1, // enable_auto_alt_ref
#if CONFIG_EXT_REFS
@@ -80,7 +80,7 @@
5, // arnr_strength
0, // min_gf_interval; 0 -> default decision
0, // max_gf_interval; 0 -> default decision
- VPX_TUNE_PSNR, // tuning
+ AOM_TUNE_PSNR, // tuning
10, // cq_level
0, // rc_max_intra_bitrate_pct
0, // rc_max_inter_bitrate_pct
@@ -94,41 +94,41 @@
1, // frame_parallel_decoding_mode
NO_AQ, // aq_mode
0, // frame_periodic_delta_q
- VPX_BITS_8, // Bit depth
- VPX_CONTENT_DEFAULT, // content
- VPX_CS_UNKNOWN, // color space
+ AOM_BITS_8, // Bit depth
+ AOM_CONTENT_DEFAULT, // content
+ AOM_CS_UNKNOWN, // color space
0, // color range
0, // render width
0, // render height
- VPX_SUPERBLOCK_SIZE_DYNAMIC // superblock_size
+ AOM_SUPERBLOCK_SIZE_DYNAMIC // superblock_size
};
-struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_enc_cfg_t cfg;
- struct vp10_extracfg extra_cfg;
- VP10EncoderConfig oxcf;
- VP10_COMP *cpi;
+struct aom_codec_alg_priv {
+ aom_codec_priv_t base;
+ aom_codec_enc_cfg_t cfg;
+ struct av1_extracfg extra_cfg;
+ AV1EncoderConfig oxcf;
+ AV1_COMP *cpi;
unsigned char *cx_data;
size_t cx_data_sz;
unsigned char *pending_cx_data;
size_t pending_cx_data_sz;
int pending_frame_count;
size_t pending_frame_sizes[8];
- vpx_image_t preview_img;
- vpx_enc_frame_flags_t next_frame_flags;
- vp8_postproc_cfg_t preview_ppcfg;
- vpx_codec_pkt_list_decl(256) pkt_list;
+ aom_image_t preview_img;
+ aom_enc_frame_flags_t next_frame_flags;
+ aom_postproc_cfg_t preview_ppcfg;
+ aom_codec_pkt_list_decl(256) pkt_list;
unsigned int fixed_kf_cntr;
// BufferPool that holds all reference frames.
BufferPool *buffer_pool;
};
-static vpx_codec_err_t update_error_state(
- vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
- const vpx_codec_err_t res = error->error_code;
+static aom_codec_err_t update_error_state(
+ aom_codec_alg_priv_t *ctx, const struct aom_internal_error_info *error) {
+ const aom_codec_err_t res = error->error_code;
- if (res != VPX_CODEC_OK)
+ if (res != AOM_CODEC_OK)
ctx->base.err_detail = error->has_detail ? error->detail : NULL;
return res;
@@ -138,7 +138,7 @@
#define ERROR(str) \
do { \
ctx->base.err_detail = str; \
- return VPX_CODEC_INVALID_PARAM; \
+ return AOM_CODEC_INVALID_PARAM; \
} while (0)
#define RANGE_CHECK(p, memb, lo, hi) \
@@ -162,9 +162,9 @@
if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
} while (0)
-static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
- const vpx_codec_enc_cfg_t *cfg,
- const struct vp10_extracfg *extra_cfg) {
+static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
+ const aom_codec_enc_cfg_t *cfg,
+ const struct av1_extracfg *extra_cfg) {
RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
@@ -178,16 +178,16 @@
RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1);
RANGE_CHECK_HI(cfg, g_threads, 64);
RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
- RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+ RANGE_CHECK(cfg, rc_end_usage, AOM_VBR, AOM_Q);
RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
- RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+ RANGE_CHECK(cfg, kf_mode, AOM_KF_DISABLED, AOM_KF_AUTO);
RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
- RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+ RANGE_CHECK(cfg, g_pass, AOM_RC_ONE_PASS, AOM_RC_LAST_PASS);
RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
if (extra_cfg->max_gf_interval > 0) {
@@ -203,9 +203,9 @@
RANGE_CHECK(cfg, rc_scaled_height, 0, cfg->g_h);
}
- // VP9 does not support a lower bound on the keyframe interval in
+ // AV1 does not support a lower bound on the keyframe interval in
// automatic keyframe placement mode.
- if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
+ if (cfg->kf_mode != AOM_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
cfg->kf_min_dist > 0)
ERROR(
"kf_min_dist not supported in auto mode, use 0 "
@@ -217,14 +217,14 @@
#endif // CONFIG_EXT_REFS
RANGE_CHECK(extra_cfg, cpu_used, -8, 8);
RANGE_CHECK_HI(extra_cfg, noise_sensitivity, 6);
- RANGE_CHECK(extra_cfg, superblock_size, VPX_SUPERBLOCK_SIZE_64X64,
- VPX_SUPERBLOCK_SIZE_DYNAMIC);
+ RANGE_CHECK(extra_cfg, superblock_size, AOM_SUPERBLOCK_SIZE_64X64,
+ AOM_SUPERBLOCK_SIZE_DYNAMIC);
#if CONFIG_EXT_TILE
// TODO(any): Waring. If CONFIG_EXT_TILE is true, tile_columns really
// means tile_width, and tile_rows really means tile_hight. The interface
// should be sanitized.
#if CONFIG_EXT_PARTITION
- if (extra_cfg->superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
+ if (extra_cfg->superblock_size != AOM_SUPERBLOCK_SIZE_64X64) {
if (extra_cfg->tile_columns != UINT_MAX)
RANGE_CHECK(extra_cfg, tile_columns, 1, 32);
if (extra_cfg->tile_rows != UINT_MAX)
@@ -245,15 +245,15 @@
RANGE_CHECK(extra_cfg, arnr_max_frames, 0, 15);
RANGE_CHECK_HI(extra_cfg, arnr_strength, 6);
RANGE_CHECK(extra_cfg, cq_level, 0, 63);
- RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
+ RANGE_CHECK(cfg, g_bit_depth, AOM_BITS_8, AOM_BITS_12);
RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
- RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
+ RANGE_CHECK(extra_cfg, content, AOM_CONTENT_DEFAULT, AOM_CONTENT_INVALID - 1);
- // TODO(yaowu): remove this when ssim tuning is implemented for vp10
- if (extra_cfg->tuning == VPX_TUNE_SSIM)
- ERROR("Option --tune=ssim is not currently supported in VP10.");
+ // TODO(yaowu): remove this when ssim tuning is implemented for av1
+ if (extra_cfg->tuning == AOM_TUNE_SSIM)
+ ERROR("Option --tune=ssim is not currently supported in AV1.");
- if (cfg->g_pass == VPX_RC_LAST_PASS) {
+ if (cfg->g_pass == AOM_RC_LAST_PASS) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
const FIRSTPASS_STATS *stats;
@@ -274,45 +274,45 @@
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
-#if !CONFIG_VP9_HIGHBITDEPTH
+#if !CONFIG_AOM_HIGHBITDEPTH
if (cfg->g_profile > (unsigned int)PROFILE_1) {
ERROR("Profile > 1 not supported in this build configuration");
}
#endif
if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
- cfg->g_bit_depth > VPX_BITS_8) {
+ cfg->g_bit_depth > AOM_BITS_8) {
ERROR("Codec high bit-depth not supported in profile < 2");
}
if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) {
ERROR("Source high bit-depth not supported in profile < 2");
}
if (cfg->g_profile > (unsigned int)PROFILE_1 &&
- cfg->g_bit_depth == VPX_BITS_8) {
+ cfg->g_bit_depth == AOM_BITS_8) {
ERROR("Codec bit-depth 8 not supported in profile > 1");
}
- RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB);
+ RANGE_CHECK(extra_cfg, color_space, AOM_CS_UNKNOWN, AOM_CS_SRGB);
RANGE_CHECK(extra_cfg, color_range, 0, 1);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
- const vpx_image_t *img) {
+static aom_codec_err_t validate_img(aom_codec_alg_priv_t *ctx,
+ const aom_image_t *img) {
switch (img->fmt) {
- case VPX_IMG_FMT_YV12:
- case VPX_IMG_FMT_I420:
- case VPX_IMG_FMT_I42016: break;
- case VPX_IMG_FMT_I422:
- case VPX_IMG_FMT_I444:
- case VPX_IMG_FMT_I440:
+ case AOM_IMG_FMT_YV12:
+ case AOM_IMG_FMT_I420:
+ case AOM_IMG_FMT_I42016: break;
+ case AOM_IMG_FMT_I422:
+ case AOM_IMG_FMT_I444:
+ case AOM_IMG_FMT_I440:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) {
ERROR(
"Invalid image format. I422, I444, I440 images are "
"not supported in profile.");
}
break;
- case VPX_IMG_FMT_I42216:
- case VPX_IMG_FMT_I44416:
- case VPX_IMG_FMT_I44016:
+ case AOM_IMG_FMT_I42216:
+ case AOM_IMG_FMT_I44416:
+ case AOM_IMG_FMT_I44016:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 &&
ctx->cfg.g_profile != (unsigned int)PROFILE_3) {
ERROR(
@@ -330,29 +330,29 @@
if (img->d_w != ctx->cfg.g_w || img->d_h != ctx->cfg.g_h)
ERROR("Image size must match encoder init configuration size");
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static int get_image_bps(const vpx_image_t *img) {
+static int get_image_bps(const aom_image_t *img) {
switch (img->fmt) {
- case VPX_IMG_FMT_YV12:
- case VPX_IMG_FMT_I420: return 12;
- case VPX_IMG_FMT_I422: return 16;
- case VPX_IMG_FMT_I444: return 24;
- case VPX_IMG_FMT_I440: return 16;
- case VPX_IMG_FMT_I42016: return 24;
- case VPX_IMG_FMT_I42216: return 32;
- case VPX_IMG_FMT_I44416: return 48;
- case VPX_IMG_FMT_I44016: return 32;
+ case AOM_IMG_FMT_YV12:
+ case AOM_IMG_FMT_I420: return 12;
+ case AOM_IMG_FMT_I422: return 16;
+ case AOM_IMG_FMT_I444: return 24;
+ case AOM_IMG_FMT_I440: return 16;
+ case AOM_IMG_FMT_I42016: return 24;
+ case AOM_IMG_FMT_I42216: return 32;
+ case AOM_IMG_FMT_I44416: return 48;
+ case AOM_IMG_FMT_I44016: return 32;
default: assert(0 && "Invalid image format"); break;
}
return 0;
}
-static vpx_codec_err_t set_encoder_config(
- VP10EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg,
- const struct vp10_extracfg *extra_cfg) {
- const int is_vbr = cfg->rc_end_usage == VPX_VBR;
+static aom_codec_err_t set_encoder_config(
+ AV1EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
+ const struct av1_extracfg *extra_cfg) {
+ const int is_vbr = cfg->rc_end_usage == AOM_VBR;
oxcf->profile = cfg->g_profile;
oxcf->max_threads = (int)cfg->g_threads;
oxcf->width = cfg->g_w;
@@ -366,13 +366,13 @@
oxcf->mode = GOOD;
switch (cfg->g_pass) {
- case VPX_RC_ONE_PASS: oxcf->pass = 0; break;
- case VPX_RC_FIRST_PASS: oxcf->pass = 1; break;
- case VPX_RC_LAST_PASS: oxcf->pass = 2; break;
+ case AOM_RC_ONE_PASS: oxcf->pass = 0; break;
+ case AOM_RC_FIRST_PASS: oxcf->pass = 1; break;
+ case AOM_RC_LAST_PASS: oxcf->pass = 2; break;
}
oxcf->lag_in_frames =
- cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
+ cfg->g_pass == AOM_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
oxcf->rc_mode = cfg->rc_end_usage;
// Convert target bandwidth from Kbit/s to Bit/s
@@ -382,10 +382,10 @@
oxcf->gf_cbr_boost_pct = extra_cfg->gf_cbr_boost_pct;
oxcf->best_allowed_q =
- extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
+ extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_min_quantizer);
oxcf->worst_allowed_q =
- extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
- oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+ extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_max_quantizer);
+ oxcf->cq_level = av1_quantizer_to_qindex(extra_cfg->cq_level);
oxcf->fixed_q = -1;
#if CONFIG_AOM_QM
@@ -419,7 +419,7 @@
oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
oxcf->auto_key =
- cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
+ cfg->kf_mode == AOM_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
oxcf->key_freq = cfg->kf_max_dist;
@@ -458,12 +458,12 @@
{
#if CONFIG_EXT_PARTITION
const unsigned int max =
- extra_cfg->superblock_size == VPX_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
+ extra_cfg->superblock_size == AOM_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
#else
const unsigned int max = 64;
#endif // CONFIG_EXT_PARTITION
- oxcf->tile_columns = VPXMIN(extra_cfg->tile_columns, max);
- oxcf->tile_rows = VPXMIN(extra_cfg->tile_rows, max);
+ oxcf->tile_columns = AOMMIN(extra_cfg->tile_columns, max);
+ oxcf->tile_rows = AOMMIN(extra_cfg->tile_rows, max);
}
#else
oxcf->tile_columns = extra_cfg->tile_columns;
@@ -478,7 +478,7 @@
oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
/*
- printf("Current VP9 Settings: \n");
+ printf("Current AV1 Settings: \n");
printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
printf("sharpness: %d\n", oxcf->sharpness);
@@ -509,16 +509,16 @@
printf("frame parallel detokenization: %d\n",
oxcf->frame_parallel_decoding_mode);
*/
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
- const vpx_codec_enc_cfg_t *cfg) {
- vpx_codec_err_t res;
+static aom_codec_err_t encoder_set_config(aom_codec_alg_priv_t *ctx,
+ const aom_codec_enc_cfg_t *cfg) {
+ aom_codec_err_t res;
int force_key = 0;
if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) {
- if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS)
+ if (cfg->g_lag_in_frames > 1 || cfg->g_pass != AOM_RC_ONE_PASS)
ERROR("Cannot change width or height after initialization");
if (!valid_ref_frame_size(ctx->cfg.g_w, ctx->cfg.g_h, cfg->g_w, cfg->g_h) ||
(ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) ||
@@ -535,246 +535,246 @@
res = validate_config(ctx, cfg, &ctx->extra_cfg);
- if (res == VPX_CODEC_OK) {
+ if (res == AOM_CODEC_OK) {
ctx->cfg = *cfg;
set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
// On profile change, request a key frame
force_key |= ctx->cpi->common.profile != ctx->oxcf.profile;
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
- if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+ if (force_key) ctx->next_frame_flags |= AOM_EFLAG_FORCE_KF;
return res;
}
-static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_quantizer(aom_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
- *arg = vp10_get_quantizer(ctx->cpi);
- return VPX_CODEC_OK;
+ if (arg == NULL) return AOM_CODEC_INVALID_PARAM;
+ *arg = av1_get_quantizer(ctx->cpi);
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_quantizer64(aom_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
- *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
- return VPX_CODEC_OK;
+ if (arg == NULL) return AOM_CODEC_INVALID_PARAM;
+ *arg = av1_qindex_to_quantizer(av1_get_quantizer(ctx->cpi));
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t update_extra_cfg(vpx_codec_alg_priv_t *ctx,
- const struct vp10_extracfg *extra_cfg) {
- const vpx_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
- if (res == VPX_CODEC_OK) {
+static aom_codec_err_t update_extra_cfg(aom_codec_alg_priv_t *ctx,
+ const struct av1_extracfg *extra_cfg) {
+ const aom_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
+ if (res == AOM_CODEC_OK) {
ctx->extra_cfg = *extra_cfg;
set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
return res;
}
-static vpx_codec_err_t ctrl_set_cpuused(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_cpuused(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.cpu_used = CAST(AOME_SET_CPUUSED, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_enable_auto_alt_ref(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_auto_alt_ref(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.enable_auto_alt_ref = CAST(AOME_SET_ENABLEAUTOALTREF, args);
return update_extra_cfg(ctx, &extra_cfg);
}
#if CONFIG_EXT_REFS
-static vpx_codec_err_t ctrl_set_enable_auto_bwd_ref(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_auto_bwd_ref(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.enable_auto_bwd_ref = CAST(VP8E_SET_ENABLEAUTOBWDREF, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.enable_auto_bwd_ref = CAST(AOME_SET_ENABLEAUTOBWDREF, args);
return update_extra_cfg(ctx, &extra_cfg);
}
#endif // CONFIG_EXT_REFS
-static vpx_codec_err_t ctrl_set_noise_sensitivity(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_noise_sensitivity(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.noise_sensitivity = CAST(VP9E_SET_NOISE_SENSITIVITY, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.noise_sensitivity = CAST(AV1E_SET_NOISE_SENSITIVITY, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_sharpness(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_sharpness(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.sharpness = CAST(VP8E_SET_SHARPNESS, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.sharpness = CAST(AOME_SET_SHARPNESS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_static_thresh(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_static_thresh(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.static_thresh = CAST(AOME_SET_STATIC_THRESHOLD, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_tile_columns(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tile_columns(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.tile_columns = CAST(VP9E_SET_TILE_COLUMNS, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.tile_columns = CAST(AV1E_SET_TILE_COLUMNS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_tile_rows(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tile_rows(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.tile_rows = CAST(VP9E_SET_TILE_ROWS, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.tile_rows = CAST(AV1E_SET_TILE_ROWS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_arnr_max_frames(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_max_frames(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.arnr_max_frames = CAST(AOME_SET_ARNR_MAXFRAMES, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_arnr_strength(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_strength(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.arnr_strength = CAST(AOME_SET_ARNR_STRENGTH, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_arnr_type(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_type(aom_codec_alg_priv_t *ctx,
va_list args) {
(void)ctx;
(void)args;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_tuning(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tuning(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.tuning = CAST(VP8E_SET_TUNING, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.tuning = CAST(AOME_SET_TUNING, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_cq_level(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_cq_level(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.cq_level = CAST(AOME_SET_CQ_LEVEL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
- vpx_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
+ aom_codec_alg_priv_t *ctx, va_list args) {
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.rc_max_intra_bitrate_pct =
- CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args);
+ CAST(AOME_SET_MAX_INTRA_BITRATE_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
- vpx_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
+ aom_codec_alg_priv_t *ctx, va_list args) {
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.rc_max_inter_bitrate_pct =
- CAST(VP8E_SET_MAX_INTER_BITRATE_PCT, args);
+ CAST(AOME_SET_MAX_INTER_BITRATE_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.gf_cbr_boost_pct = CAST(AV1E_SET_GF_CBR_BOOST_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_lossless(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_lossless(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.lossless = CAST(VP9E_SET_LOSSLESS, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.lossless = CAST(AV1E_SET_LOSSLESS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
#if CONFIG_AOM_QM
-static vpx_codec_err_t ctrl_set_enable_qm(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.enable_qm = CAST(VP9E_SET_ENABLE_QM, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.enable_qm = CAST(AV1E_SET_ENABLE_QM, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_qm_min(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_qm_min(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.qm_min = CAST(VP9E_SET_QM_MIN, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.qm_min = CAST(AV1E_SET_QM_MIN, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_qm_max(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_qm_max(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.qm_max = CAST(VP9E_SET_QM_MAX, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.qm_max = CAST(AV1E_SET_QM_MAX, args);
return update_extra_cfg(ctx, &extra_cfg);
}
#endif
-static vpx_codec_err_t ctrl_set_frame_parallel_decoding_mode(
- vpx_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_frame_parallel_decoding_mode(
+ aom_codec_alg_priv_t *ctx, va_list args) {
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.frame_parallel_decoding_mode =
- CAST(VP9E_SET_FRAME_PARALLEL_DECODING, args);
+ CAST(AV1E_SET_FRAME_PARALLEL_DECODING, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_aq_mode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_aq_mode(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.aq_mode = CAST(VP9E_SET_AQ_MODE, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.aq_mode = CAST(AV1E_SET_AQ_MODE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_min_gf_interval(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_min_gf_interval(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.min_gf_interval = CAST(VP9E_SET_MIN_GF_INTERVAL, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.min_gf_interval = CAST(AV1E_SET_MIN_GF_INTERVAL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_max_gf_interval(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_max_gf_interval(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.max_gf_interval = CAST(VP9E_SET_MAX_GF_INTERVAL, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.max_gf_interval = CAST(AV1E_SET_MAX_GF_INTERVAL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_frame_periodic_boost(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_frame_periodic_boost(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.frame_periodic_boost = CAST(VP9E_SET_FRAME_PERIODIC_BOOST, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.frame_periodic_boost = CAST(AV1E_SET_FRAME_PERIODIC_BOOST, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
- vpx_codec_priv_enc_mr_cfg_t *data) {
- vpx_codec_err_t res = VPX_CODEC_OK;
+static aom_codec_err_t encoder_init(aom_codec_ctx_t *ctx,
+ aom_codec_priv_enc_mr_cfg_t *data) {
+ aom_codec_err_t res = AOM_CODEC_OK;
(void)data;
if (ctx->priv == NULL) {
- vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
- if (priv == NULL) return VPX_CODEC_MEM_ERROR;
+ aom_codec_alg_priv_t *const priv = aom_calloc(1, sizeof(*priv));
+ if (priv == NULL) return AOM_CODEC_MEM_ERROR;
- ctx->priv = (vpx_codec_priv_t *)priv;
+ ctx->priv = (aom_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
ctx->priv->enc.total_encoders = 1;
- priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
+ priv->buffer_pool = (BufferPool *)aom_calloc(1, sizeof(BufferPool));
+ if (priv->buffer_pool == NULL) return AOM_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) {
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
#endif
@@ -785,19 +785,19 @@
}
priv->extra_cfg = default_extra_cfg;
- once(vp10_initialize_enc);
+ once(av1_initialize_enc);
res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
- if (res == VPX_CODEC_OK) {
+ if (res == AOM_CODEC_OK) {
set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
priv->oxcf.use_highbitdepth =
- (ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
+ (ctx->init_flags & AOM_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
#endif
- priv->cpi = vp10_create_compressor(&priv->oxcf, priv->buffer_pool);
+ priv->cpi = av1_create_compressor(&priv->oxcf, priv->buffer_pool);
if (priv->cpi == NULL)
- res = VPX_CODEC_MEM_ERROR;
+ res = AOM_CODEC_MEM_ERROR;
else
priv->cpi->output_pkt_list = &priv->pkt_list.head;
}
@@ -806,26 +806,26 @@
return res;
}
-static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t encoder_destroy(aom_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
- vp10_remove_compressor(ctx->cpi);
+ av1_remove_compressor(ctx->cpi);
#if CONFIG_MULTITHREAD
pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
#endif
- vpx_free(ctx->buffer_pool);
- vpx_free(ctx);
- return VPX_CODEC_OK;
+ aom_free(ctx->buffer_pool);
+ aom_free(ctx);
+ return AOM_CODEC_OK;
}
-static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
+static void pick_quickcompress_mode(aom_codec_alg_priv_t *ctx,
unsigned long duration,
unsigned long deadline) {
MODE new_mode = BEST;
switch (ctx->cfg.g_pass) {
- case VPX_RC_ONE_PASS:
+ case AOM_RC_ONE_PASS:
if (deadline > 0) {
- const vpx_codec_enc_cfg_t *const cfg = &ctx->cfg;
+ const aom_codec_enc_cfg_t *const cfg = &ctx->cfg;
// Convert duration parameter from stream timebase to microseconds.
const uint64_t duration_us = (uint64_t)duration * 1000000 *
@@ -839,19 +839,19 @@
new_mode = BEST;
}
break;
- case VPX_RC_FIRST_PASS: break;
- case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
+ case AOM_RC_FIRST_PASS: break;
+ case AOM_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
}
if (ctx->oxcf.mode != new_mode) {
ctx->oxcf.mode = new_mode;
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
}
// Turn on to test if supplemental superframe data breaks decoding
// #define TEST_SUPPLEMENTAL_SUPERFRAME_DATA
-static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
+static int write_superframe_index(aom_codec_alg_priv_t *ctx) {
uint8_t marker = 0xc0;
unsigned int mask;
int mag, index_sz;
@@ -916,50 +916,50 @@
return index_sz;
}
-// vp9 uses 10,000,000 ticks/second as time stamp
+// av1 uses 10,000,000 ticks/second as time stamp
#define TICKS_PER_SEC 10000000LL
-static int64_t timebase_units_to_ticks(const vpx_rational_t *timebase,
+static int64_t timebase_units_to_ticks(const aom_rational_t *timebase,
int64_t n) {
return n * TICKS_PER_SEC * timebase->num / timebase->den;
}
-static int64_t ticks_to_timebase_units(const vpx_rational_t *timebase,
+static int64_t ticks_to_timebase_units(const aom_rational_t *timebase,
int64_t n) {
const int64_t round = TICKS_PER_SEC * timebase->num / 2 - 1;
return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC;
}
-static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi,
+static aom_codec_frame_flags_t get_frame_pkt_flags(const AV1_COMP *cpi,
unsigned int lib_flags) {
- vpx_codec_frame_flags_t flags = lib_flags << 16;
+ aom_codec_frame_flags_t flags = lib_flags << 16;
- if (lib_flags & FRAMEFLAGS_KEY) flags |= VPX_FRAME_IS_KEY;
+ if (lib_flags & FRAMEFLAGS_KEY) flags |= AOM_FRAME_IS_KEY;
- if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE;
+ if (cpi->droppable) flags |= AOM_FRAME_IS_DROPPABLE;
return flags;
}
-static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
+static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
+ const aom_image_t *img,
+ aom_codec_pts_t pts,
unsigned long duration,
- vpx_enc_frame_flags_t enc_flags,
+ aom_enc_frame_flags_t enc_flags,
unsigned long deadline) {
- volatile vpx_codec_err_t res = VPX_CODEC_OK;
- volatile vpx_enc_frame_flags_t flags = enc_flags;
- VP10_COMP *const cpi = ctx->cpi;
- const vpx_rational_t *const timebase = &ctx->cfg.g_timebase;
+ volatile aom_codec_err_t res = AOM_CODEC_OK;
+ volatile aom_enc_frame_flags_t flags = enc_flags;
+ AV1_COMP *const cpi = ctx->cpi;
+ const aom_rational_t *const timebase = &ctx->cfg.g_timebase;
size_t data_sz;
- if (cpi == NULL) return VPX_CODEC_INVALID_PARAM;
+ if (cpi == NULL) return AOM_CODEC_INVALID_PARAM;
if (img != NULL) {
res = validate_img(ctx, img);
// TODO(jzern) the checks related to cpi's validity should be treated as a
// failure condition, encoder setup is done fully in init() currently.
- if (res == VPX_CODEC_OK) {
+ if (res == AOM_CODEC_OK) {
#if CONFIG_EXT_REFS
data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img);
#else
@@ -974,42 +974,42 @@
free(ctx->cx_data);
ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
if (ctx->cx_data == NULL) {
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
}
}
}
pick_quickcompress_mode(ctx, duration, deadline);
- vpx_codec_pkt_list_init(&ctx->pkt_list);
+ aom_codec_pkt_list_init(&ctx->pkt_list);
// Handle Flags
- if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
- ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ if (((flags & AOM_EFLAG_NO_UPD_GF) && (flags & AOM_EFLAG_FORCE_GF)) ||
+ ((flags & AOM_EFLAG_NO_UPD_ARF) && (flags & AOM_EFLAG_FORCE_ARF))) {
ctx->base.err_detail = "Conflicting flags.";
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
if (setjmp(cpi->common.error.jmp)) {
cpi->common.error.setjmp = 0;
res = update_error_state(ctx, &cpi->common.error);
- vpx_clear_system_state();
+ aom_clear_system_state();
return res;
}
cpi->common.error.setjmp = 1;
- vp10_apply_encoding_flags(cpi, flags);
+ av1_apply_encoding_flags(cpi, flags);
// Handle fixed keyframe intervals
- if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
+ if (ctx->cfg.kf_mode == AOM_KF_AUTO &&
ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
- flags |= VPX_EFLAG_FORCE_KF;
+ flags |= AOM_EFLAG_FORCE_KF;
ctx->fixed_kf_cntr = 1;
}
}
- if (res == VPX_CODEC_OK) {
+ if (res == AOM_CODEC_OK) {
unsigned int lib_flags = 0;
YV12_BUFFER_CONFIG sd;
int64_t dst_time_stamp = timebase_units_to_ticks(timebase, pts);
@@ -1019,15 +1019,15 @@
unsigned char *cx_data;
// Set up internal flags
- if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
+ if (ctx->base.init_flags & AOM_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
if (img != NULL) {
res = image2yuvconfig(img, &sd);
// Store the original flags in to the frame buffer. Will extract the
// key frame flag when we actually encode this frame.
- if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
- dst_time_stamp, dst_end_time_stamp)) {
+ if (av1_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+ dst_time_stamp, dst_end_time_stamp)) {
res = update_error_state(ctx, &cpi->common.error);
}
ctx->next_frame_flags = 0;
@@ -1047,18 +1047,18 @@
* the buffer size anyway.
*/
if (cx_data_sz < ctx->cx_data_sz / 2) {
- vpx_internal_error(&cpi->common.error, VPX_CODEC_ERROR,
+ aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR,
"Compressed data buffer too small");
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
while (cx_data_sz >= ctx->cx_data_sz / 2 &&
- -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
- &dst_time_stamp, &dst_end_time_stamp,
- !img)) {
+ -1 != av1_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+ &dst_time_stamp, &dst_end_time_stamp,
+ !img)) {
if (size) {
- vpx_codec_cx_pkt_t pkt;
+ aom_codec_cx_pkt_t pkt;
// Pack invisible frames with the next visible frame
if (!cpi->common.show_frame) {
@@ -1072,7 +1072,7 @@
}
// Add the frame packet to the list of returned packets.
- pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+ pkt.kind = AOM_CODEC_CX_FRAME_PKT;
pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp);
pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
timebase, dst_end_time_stamp - dst_time_stamp);
@@ -1093,7 +1093,7 @@
}
pkt.data.frame.partition_id = -1;
- vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ aom_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
cx_data += size;
cx_data_sz -= size;
@@ -1105,87 +1105,87 @@
return res;
}
-static const vpx_codec_cx_pkt_t *encoder_get_cxdata(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter) {
- return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
+static const aom_codec_cx_pkt_t *encoder_get_cxdata(aom_codec_alg_priv_t *ctx,
+ aom_codec_iter_t *iter) {
+ return aom_codec_pkt_list_get(&ctx->pkt_list.head, iter);
}
-static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+ aom_ref_frame_t *const frame = va_arg(args, aom_ref_frame_t *);
if (frame != NULL) {
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- vp10_set_reference_enc(ctx->cpi,
- ref_frame_to_vp10_reframe(frame->frame_type), &sd);
- return VPX_CODEC_OK;
+ av1_set_reference_enc(ctx->cpi, ref_frame_to_av1_reframe(frame->frame_type),
+ &sd);
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+ aom_ref_frame_t *const frame = va_arg(args, aom_ref_frame_t *);
if (frame != NULL) {
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- vp10_copy_reference_enc(ctx->cpi,
- ref_frame_to_vp10_reframe(frame->frame_type), &sd);
- return VPX_CODEC_OK;
+ av1_copy_reference_enc(ctx->cpi,
+ ref_frame_to_av1_reframe(frame->frame_type), &sd);
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vp9_ref_frame_t *const frame = va_arg(args, vp9_ref_frame_t *);
+ av1_ref_frame_t *const frame = va_arg(args, av1_ref_frame_t *);
if (frame != NULL) {
YV12_BUFFER_CONFIG *fb = get_ref_frame(&ctx->cpi->common, frame->idx);
- if (fb == NULL) return VPX_CODEC_ERROR;
+ if (fb == NULL) return AOM_CODEC_ERROR;
yuvconfig2image(&frame->img, fb, NULL);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_get_new_frame_image(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_new_frame_image(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_image_t *const new_img = va_arg(args, vpx_image_t *);
+ aom_image_t *const new_img = va_arg(args, aom_image_t *);
if (new_img != NULL) {
YV12_BUFFER_CONFIG new_frame;
- if (vp10_get_last_show_frame(ctx->cpi, &new_frame) == 0) {
+ if (av1_get_last_show_frame(ctx->cpi, &new_frame) == 0) {
yuvconfig2image(new_img, &new_frame, NULL);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_previewpp(aom_codec_alg_priv_t *ctx,
va_list args) {
(void)ctx;
(void)args;
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
-static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
+static aom_image_t *encoder_get_preview(aom_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
- if (vp10_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
+ if (av1_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
yuvconfig2image(&ctx->preview_img, &sd, NULL);
return &ctx->preview_img;
} else {
@@ -1193,160 +1193,160 @@
}
}
-static vpx_codec_err_t ctrl_use_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_use_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
const int reference_flag = va_arg(args, int);
- vp10_use_as_reference(ctx->cpi, reference_flag);
- return VPX_CODEC_OK;
+ av1_use_as_reference(ctx->cpi, reference_flag);
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_roi_map(aom_codec_alg_priv_t *ctx,
va_list args) {
(void)ctx;
(void)args;
- // TODO(yaowu): Need to re-implement and test for VP9.
- return VPX_CODEC_INVALID_PARAM;
+ // TODO(yaowu): Need to re-implement and test for AV1.
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_active_map(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
+ aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
if (map) {
- if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
- (int)map->cols))
- return VPX_CODEC_OK;
+ if (!av1_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
+ return AOM_CODEC_OK;
else
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_active_map(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
+ aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
if (map) {
- if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
- (int)map->cols))
- return VPX_CODEC_OK;
+ if (!av1_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
+ return AOM_CODEC_OK;
else
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_scale_mode(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
+ aom_scaling_mode_t *const mode = va_arg(args, aom_scaling_mode_t *);
if (mode) {
const int res =
- vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
- (VPX_SCALING)mode->v_scaling_mode);
- return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
+ av1_set_internal_size(ctx->cpi, (AOM_SCALING)mode->h_scaling_mode,
+ (AOM_SCALING)mode->v_scaling_mode);
+ return (res == 0) ? AOM_CODEC_OK : AOM_CODEC_INVALID_PARAM;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_set_tune_content(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tune_content(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.content = CAST(VP9E_SET_TUNE_CONTENT, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.content = CAST(AV1E_SET_TUNE_CONTENT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_color_space(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_color_space(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.color_space = CAST(AV1E_SET_COLOR_SPACE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_color_range(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_color_range(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.color_range = CAST(VP9E_SET_COLOR_RANGE, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.color_range = CAST(AV1E_SET_COLOR_RANGE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_render_size(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
int *const render_size = va_arg(args, int *);
extra_cfg.render_width = render_size[0];
extra_cfg.render_height = render_size[1];
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_superblock_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_superblock_size(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.superblock_size = CAST(VP10E_SET_SUPERBLOCK_SIZE, args);
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.superblock_size = CAST(AV1E_SET_SUPERBLOCK_SIZE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
- { VP8_COPY_REFERENCE, ctrl_copy_reference },
- { VP8E_USE_REFERENCE, ctrl_use_reference },
+static aom_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
+ { AOM_COPY_REFERENCE, ctrl_copy_reference },
+ { AOME_USE_REFERENCE, ctrl_use_reference },
// Setters
- { VP8_SET_REFERENCE, ctrl_set_reference },
- { VP8_SET_POSTPROC, ctrl_set_previewpp },
- { VP8E_SET_ROI_MAP, ctrl_set_roi_map },
- { VP8E_SET_ACTIVEMAP, ctrl_set_active_map },
- { VP8E_SET_SCALEMODE, ctrl_set_scale_mode },
- { VP8E_SET_CPUUSED, ctrl_set_cpuused },
- { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
+ { AOM_SET_REFERENCE, ctrl_set_reference },
+ { AOM_SET_POSTPROC, ctrl_set_previewpp },
+ { AOME_SET_ROI_MAP, ctrl_set_roi_map },
+ { AOME_SET_ACTIVEMAP, ctrl_set_active_map },
+ { AOME_SET_SCALEMODE, ctrl_set_scale_mode },
+ { AOME_SET_CPUUSED, ctrl_set_cpuused },
+ { AOME_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
#if CONFIG_EXT_REFS
- { VP8E_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref },
+ { AOME_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref },
#endif // CONFIG_EXT_REFS
- { VP8E_SET_SHARPNESS, ctrl_set_sharpness },
- { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
- { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
- { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows },
- { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
- { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
- { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type },
- { VP8E_SET_TUNING, ctrl_set_tuning },
- { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level },
- { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
- { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
- { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
- { VP9E_SET_LOSSLESS, ctrl_set_lossless },
+ { AOME_SET_SHARPNESS, ctrl_set_sharpness },
+ { AOME_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
+ { AV1E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
+ { AV1E_SET_TILE_ROWS, ctrl_set_tile_rows },
+ { AOME_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
+ { AOME_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
+ { AOME_SET_ARNR_TYPE, ctrl_set_arnr_type },
+ { AOME_SET_TUNING, ctrl_set_tuning },
+ { AOME_SET_CQ_LEVEL, ctrl_set_cq_level },
+ { AOME_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
+ { AV1E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
+ { AV1E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+ { AV1E_SET_LOSSLESS, ctrl_set_lossless },
#if CONFIG_AOM_QM
- { VP9E_SET_ENABLE_QM, ctrl_set_enable_qm },
- { VP9E_SET_QM_MIN, ctrl_set_qm_min },
- { VP9E_SET_QM_MAX, ctrl_set_qm_max },
+ { AV1E_SET_ENABLE_QM, ctrl_set_enable_qm },
+ { AV1E_SET_QM_MIN, ctrl_set_qm_min },
+ { AV1E_SET_QM_MAX, ctrl_set_qm_max },
#endif
- { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
- { VP9E_SET_AQ_MODE, ctrl_set_aq_mode },
- { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
- { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content },
- { VP9E_SET_COLOR_SPACE, ctrl_set_color_space },
- { VP9E_SET_COLOR_RANGE, ctrl_set_color_range },
- { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
- { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
- { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
- { VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
- { VP10E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size },
+ { AV1E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
+ { AV1E_SET_AQ_MODE, ctrl_set_aq_mode },
+ { AV1E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
+ { AV1E_SET_TUNE_CONTENT, ctrl_set_tune_content },
+ { AV1E_SET_COLOR_SPACE, ctrl_set_color_space },
+ { AV1E_SET_COLOR_RANGE, ctrl_set_color_range },
+ { AV1E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
+ { AV1E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
+ { AV1E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
+ { AV1E_SET_RENDER_SIZE, ctrl_set_render_size },
+ { AV1E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size },
// Getters
- { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer },
- { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
- { VP9_GET_REFERENCE, ctrl_get_reference },
- { VP9E_GET_ACTIVEMAP, ctrl_get_active_map },
- { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
+ { AOME_GET_LAST_QUANTIZER, ctrl_get_quantizer },
+ { AOME_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
+ { AV1_GET_REFERENCE, ctrl_get_reference },
+ { AV1E_GET_ACTIVEMAP, ctrl_get_active_map },
+ { AV1_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
{ -1, NULL },
};
-static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
+static aom_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
{ 0,
{
// NOLINT
@@ -1356,14 +1356,14 @@
320, // g_width
240, // g_height
- VPX_BITS_8, // g_bit_depth
+ AOM_BITS_8, // g_bit_depth
8, // g_input_bit_depth
{ 1, 30 }, // g_timebase
0, // g_error_resilient
- VPX_RC_ONE_PASS, // g_pass
+ AOM_RC_ONE_PASS, // g_pass
25, // g_lag_in_frames
@@ -1374,7 +1374,7 @@
60, // rc_resize_down_thresold
30, // rc_resize_up_thresold
- VPX_VBR, // rc_end_usage
+ AOM_VBR, // rc_end_usage
{ NULL, 0 }, // rc_twopass_stats_in
{ NULL, 0 }, // rc_firstpass_mb_stats_in
256, // rc_target_bandwidth
@@ -1392,7 +1392,7 @@
2000, // rc_two_pass_vbrmax_section
// keyframing settings (kf)
- VPX_KF_AUTO, // g_kfmode
+ AOM_KF_AUTO, // g_kfmode
0, // kf_min_dist
9999, // kf_max_dist
} },
@@ -1401,33 +1401,33 @@
#ifndef VERSION_STRING
#define VERSION_STRING
#endif
-CODEC_INTERFACE(vpx_codec_vp10_cx) = {
- "WebM Project VP10 Encoder" VERSION_STRING,
- VPX_CODEC_INTERNAL_ABI_VERSION,
-#if CONFIG_VP9_HIGHBITDEPTH
- VPX_CODEC_CAP_HIGHBITDEPTH |
+CODEC_INTERFACE(aom_codec_av1_cx) = {
+ "AOMedia Project AV1 Encoder" VERSION_STRING,
+ AOM_CODEC_INTERNAL_ABI_VERSION,
+#if CONFIG_AOM_HIGHBITDEPTH
+ AOM_CODEC_CAP_HIGHBITDEPTH |
#endif
- VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t
- encoder_init, // vpx_codec_init_fn_t
- encoder_destroy, // vpx_codec_destroy_fn_t
- encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+ AOM_CODEC_CAP_ENCODER | AOM_CODEC_CAP_PSNR, // aom_codec_caps_t
+ encoder_init, // aom_codec_init_fn_t
+ encoder_destroy, // aom_codec_destroy_fn_t
+ encoder_ctrl_maps, // aom_codec_ctrl_fn_map_t
{
// NOLINT
- NULL, // vpx_codec_peek_si_fn_t
- NULL, // vpx_codec_get_si_fn_t
- NULL, // vpx_codec_decode_fn_t
- NULL, // vpx_codec_frame_get_fn_t
- NULL // vpx_codec_set_fb_fn_t
+ NULL, // aom_codec_peek_si_fn_t
+ NULL, // aom_codec_get_si_fn_t
+ NULL, // aom_codec_decode_fn_t
+ NULL, // aom_codec_frame_get_fn_t
+ NULL // aom_codec_set_fb_fn_t
},
{
// NOLINT
1, // 1 cfg map
- encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
- encoder_encode, // vpx_codec_encode_fn_t
- encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t
- encoder_set_config, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ encoder_usage_cfg_map, // aom_codec_enc_cfg_map_t
+ encoder_encode, // aom_codec_encode_fn_t
+ encoder_get_cxdata, // aom_codec_get_cx_data_fn_t
+ encoder_set_config, // aom_codec_enc_config_set_fn_t
+ NULL, // aom_codec_get_global_headers_fn_t
+ encoder_get_preview, // aom_codec_get_preview_frame_fn_t
+ NULL // aom_codec_enc_mr_get_mem_loc_fn_t
}
};
diff --git a/av1/vp10_dx_iface.c b/av1/av1_dx_iface.c
similarity index 65%
rename from av1/vp10_dx_iface.c
rename to av1/av1_dx_iface.c
index 9e17c5a..53f7d46 100644
--- a/av1/vp10_dx_iface.c
+++ b/av1/av1_dx_iface.c
@@ -11,15 +11,15 @@
#include <stdlib.h>
#include <string.h>
-#include "./vpx_config.h"
-#include "./vpx_version.h"
+#include "./aom_config.h"
+#include "./aom_version.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom/vp8dx.h"
-#include "aom/vpx_decoder.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom/aomdx.h"
+#include "aom/aom_decoder.h"
#include "aom_dsp/bitreader_buffer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/frame_buffers.h"
@@ -28,9 +28,9 @@
#include "av1/decoder/decoder.h"
#include "av1/decoder/decodeframe.h"
-#include "av1/vp10_iface_common.h"
+#include "av1/av1_iface_common.h"
-typedef vpx_codec_stream_info_t vp10_stream_info_t;
+typedef aom_codec_stream_info_t av1_stream_info_t;
// This limit is due to framebuffer numbers.
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
@@ -38,18 +38,18 @@
typedef struct cache_frame {
int fb_idx;
- vpx_image_t img;
+ aom_image_t img;
} cache_frame;
-struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_dec_cfg_t cfg;
- vp10_stream_info_t si;
+struct aom_codec_alg_priv {
+ aom_codec_priv_t base;
+ aom_codec_dec_cfg_t cfg;
+ av1_stream_info_t si;
int postproc_cfg_set;
- vp8_postproc_cfg_t postproc_cfg;
- vpx_decrypt_cb decrypt_cb;
+ aom_postproc_cfg_t postproc_cfg;
+ aom_decrypt_cb decrypt_cb;
void *decrypt_state;
- vpx_image_t img;
+ aom_image_t img;
int img_avail;
int flushed;
int invert_tile_order;
@@ -61,7 +61,7 @@
// Frame parallel related.
int frame_parallel_decode; // frame-based threading.
- VPxWorker *frame_workers;
+ AVxWorker *frame_workers;
int num_frame_workers;
int next_submit_worker_id;
int last_submit_worker_id;
@@ -75,32 +75,32 @@
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
BufferPool *buffer_pool;
- // External frame buffer info to save for VP10 common.
+ // External frame buffer info to save for AV1 common.
void *ext_priv; // Private data associated with the external frame buffers.
- vpx_get_frame_buffer_cb_fn_t get_ext_fb_cb;
- vpx_release_frame_buffer_cb_fn_t release_ext_fb_cb;
+ aom_get_frame_buffer_cb_fn_t get_ext_fb_cb;
+ aom_release_frame_buffer_cb_fn_t release_ext_fb_cb;
};
-static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
- vpx_codec_priv_enc_mr_cfg_t *data) {
- // This function only allocates space for the vpx_codec_alg_priv_t
+static aom_codec_err_t decoder_init(aom_codec_ctx_t *ctx,
+ aom_codec_priv_enc_mr_cfg_t *data) {
+ // This function only allocates space for the aom_codec_alg_priv_t
// structure. More memory may be required at the time the stream
// information becomes known.
(void)data;
if (!ctx->priv) {
- vpx_codec_alg_priv_t *const priv =
- (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
- if (priv == NULL) return VPX_CODEC_MEM_ERROR;
+ aom_codec_alg_priv_t *const priv =
+ (aom_codec_alg_priv_t *)aom_calloc(1, sizeof(*priv));
+ if (priv == NULL) return AOM_CODEC_MEM_ERROR;
- ctx->priv = (vpx_codec_priv_t *)priv;
+ ctx->priv = (aom_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
priv->si.sz = sizeof(priv->si);
priv->flushed = 0;
// Only do frame parallel decode when threads > 1.
priv->frame_parallel_decode =
(ctx->config.dec && (ctx->config.dec->threads > 1) &&
- (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
+ (ctx->init_flags & AOM_CODEC_USE_FRAME_THREADING))
? 1
: 0;
if (ctx->config.dec) {
@@ -109,28 +109,28 @@
}
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t decoder_destroy(aom_codec_alg_priv_t *ctx) {
if (ctx->frame_workers != NULL) {
int i;
for (i = 0; i < ctx->num_frame_workers; ++i) {
- VPxWorker *const worker = &ctx->frame_workers[i];
+ AVxWorker *const worker = &ctx->frame_workers[i];
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- vpx_get_worker_interface()->end(worker);
- vp10_remove_common(&frame_worker_data->pbi->common);
+ aom_get_worker_interface()->end(worker);
+ av1_remove_common(&frame_worker_data->pbi->common);
#if CONFIG_LOOP_RESTORATION
- vp10_free_restoration_buffers(&frame_worker_data->pbi->common);
+ av1_free_restoration_buffers(&frame_worker_data->pbi->common);
#endif // CONFIG_LOOP_RESTORATION
- vp10_decoder_remove(frame_worker_data->pbi);
- vpx_free(frame_worker_data->scratch_buffer);
+ av1_decoder_remove(frame_worker_data->pbi);
+ aom_free(frame_worker_data->scratch_buffer);
#if CONFIG_MULTITHREAD
pthread_mutex_destroy(&frame_worker_data->stats_mutex);
pthread_cond_destroy(&frame_worker_data->stats_cond);
#endif
- vpx_free(frame_worker_data);
+ aom_free(frame_worker_data);
}
#if CONFIG_MULTITHREAD
pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
@@ -138,22 +138,22 @@
}
if (ctx->buffer_pool) {
- vp10_free_ref_frame_buffers(ctx->buffer_pool);
- vp10_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+ av1_free_ref_frame_buffers(ctx->buffer_pool);
+ av1_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
}
- vpx_free(ctx->frame_workers);
- vpx_free(ctx->buffer_pool);
- vpx_free(ctx);
- return VPX_CODEC_OK;
+ aom_free(ctx->frame_workers);
+ aom_free(ctx->buffer_pool);
+ aom_free(ctx);
+ return AOM_CODEC_OK;
}
static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
- struct vpx_read_bit_buffer *rb) {
- vpx_color_space_t color_space;
+ struct aom_read_bit_buffer *rb) {
+ aom_color_space_t color_space;
if (profile >= PROFILE_2) rb->bit_offset += 1; // Bit-depth 10 or 12.
- color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
- if (color_space != VPX_CS_SRGB) {
+ color_space = (aom_color_space_t)aom_rb_read_literal(rb, 3);
+ if (color_space != AOM_CS_SRGB) {
rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range.
if (profile == PROFILE_1 || profile == PROFILE_3) {
rb->bit_offset += 2; // subsampling x/y.
@@ -170,19 +170,19 @@
return 1;
}
-static vpx_codec_err_t decoder_peek_si_internal(
- const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
- int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
+static aom_codec_err_t decoder_peek_si_internal(
+ const uint8_t *data, unsigned int data_sz, aom_codec_stream_info_t *si,
+ int *is_intra_only, aom_decrypt_cb decrypt_cb, void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[9];
- if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
+ if (data + data_sz <= data) return AOM_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
if (decrypt_cb) {
- data_sz = VPXMIN(sizeof(clear_buffer), data_sz);
+ data_sz = AOMMIN(sizeof(clear_buffer), data_sz);
decrypt_cb(decrypt_state, data, clear_buffer, data_sz);
data = clear_buffer;
}
@@ -190,91 +190,91 @@
{
int show_frame;
int error_resilient;
- struct vpx_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
- const int frame_marker = vpx_rb_read_literal(&rb, 2);
- const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
+ struct aom_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
+ const int frame_marker = aom_rb_read_literal(&rb, 2);
+ const BITSTREAM_PROFILE profile = av1_read_profile(&rb);
- if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (frame_marker != AOM_FRAME_MARKER) return AOM_CODEC_UNSUP_BITSTREAM;
- if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (profile >= MAX_PROFILES) return AOM_CODEC_UNSUP_BITSTREAM;
if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ return AOM_CODEC_UNSUP_BITSTREAM;
- if (vpx_rb_read_bit(&rb)) { // show an existing frame
- vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
- return VPX_CODEC_OK;
+ if (aom_rb_read_bit(&rb)) { // show an existing frame
+ aom_rb_read_literal(&rb, 3); // Frame buffer to show.
+ return AOM_CODEC_OK;
}
- if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (data_sz <= 8) return AOM_CODEC_UNSUP_BITSTREAM;
- si->is_kf = !vpx_rb_read_bit(&rb);
- show_frame = vpx_rb_read_bit(&rb);
- error_resilient = vpx_rb_read_bit(&rb);
+ si->is_kf = !aom_rb_read_bit(&rb);
+ show_frame = aom_rb_read_bit(&rb);
+ error_resilient = aom_rb_read_bit(&rb);
if (si->is_kf) {
- if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!av1_read_sync_code(&rb)) return AOM_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
- vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+ return AOM_CODEC_UNSUP_BITSTREAM;
+ av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
} else {
- intra_only_flag = show_frame ? 0 : vpx_rb_read_bit(&rb);
+ intra_only_flag = show_frame ? 0 : aom_rb_read_bit(&rb);
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
- if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!av1_read_sync_code(&rb)) return AOM_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
+ return AOM_CODEC_UNSUP_BITSTREAM;
}
rb.bit_offset += REF_FRAMES; // refresh_frame_flags
- vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+ av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
}
}
}
if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t decoder_peek_si(const uint8_t *data,
+static aom_codec_err_t decoder_peek_si(const uint8_t *data,
unsigned int data_sz,
- vpx_codec_stream_info_t *si) {
+ aom_codec_stream_info_t *si) {
return decoder_peek_si_internal(data, data_sz, si, NULL, NULL, NULL);
}
-static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
- vpx_codec_stream_info_t *si) {
- const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
- ? sizeof(vp10_stream_info_t)
- : sizeof(vpx_codec_stream_info_t);
+static aom_codec_err_t decoder_get_si(aom_codec_alg_priv_t *ctx,
+ aom_codec_stream_info_t *si) {
+ const size_t sz = (si->sz >= sizeof(av1_stream_info_t))
+ ? sizeof(av1_stream_info_t)
+ : sizeof(aom_codec_stream_info_t);
memcpy(si, &ctx->si, sz);
si->sz = (unsigned int)sz;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static void set_error_detail(vpx_codec_alg_priv_t *ctx,
+static void set_error_detail(aom_codec_alg_priv_t *ctx,
const char *const error) {
ctx->base.err_detail = error;
}
-static vpx_codec_err_t update_error_state(
- vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
+static aom_codec_err_t update_error_state(
+ aom_codec_alg_priv_t *ctx, const struct aom_internal_error_info *error) {
if (error->error_code)
set_error_detail(ctx, error->has_detail ? error->detail : NULL);
return error->error_code;
}
-static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
+static void init_buffer_callbacks(aom_codec_alg_priv_t *ctx) {
int i;
for (i = 0; i < ctx->num_frame_workers; ++i) {
- VPxWorker *const worker = &ctx->frame_workers[i];
+ AVxWorker *const worker = &ctx->frame_workers[i];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
BufferPool *const pool = cm->buffer_pool;
cm->new_fb_idx = INVALID_IDX;
@@ -286,11 +286,11 @@
pool->release_fb_cb = ctx->release_ext_fb_cb;
pool->cb_priv = ctx->ext_priv;
} else {
- pool->get_fb_cb = vp10_get_frame_buffer;
- pool->release_fb_cb = vp10_release_frame_buffer;
+ pool->get_fb_cb = av1_get_frame_buffer;
+ pool->release_fb_cb = av1_release_frame_buffer;
- if (vp10_alloc_internal_frame_buffers(&pool->int_frame_buffers))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ if (av1_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to initialize internal frame buffers");
pool->cb_priv = &pool->int_frame_buffers;
@@ -298,8 +298,8 @@
}
}
-static void set_default_ppflags(vp8_postproc_cfg_t *cfg) {
- cfg->post_proc_flag = VP8_DEBLOCK | VP8_DEMACROBLOCK;
+static void set_default_ppflags(aom_postproc_cfg_t *cfg) {
+ cfg->post_proc_flag = AOM_DEBLOCK | AOM_DEMACROBLOCK;
cfg->deblocking_level = 4;
cfg->noise_level = 0;
}
@@ -309,7 +309,7 @@
const uint8_t *data = frame_worker_data->data;
(void)arg2;
- frame_worker_data->result = vp10_receive_compressed_data(
+ frame_worker_data->result = av1_receive_compressed_data(
frame_worker_data->pbi, frame_worker_data->data_size, &data);
frame_worker_data->data_end = data;
@@ -318,17 +318,17 @@
// the compressed data.
if (frame_worker_data->result != 0 ||
frame_worker_data->data + frame_worker_data->data_size - 1 > data) {
- VPxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
+ AVxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
// Signal all the other threads that are waiting for this frame.
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
frame_worker_data->frame_context_ready = 1;
lock_buffer_pool(pool);
frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
unlock_buffer_pool(pool);
frame_worker_data->pbi->need_resync = 1;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
return 0;
}
} else if (frame_worker_data->result != 0) {
@@ -339,9 +339,9 @@
return !frame_worker_data->result;
}
-static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t init_decoder(aom_codec_alg_priv_t *ctx) {
int i;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
ctx->last_show_frame = -1;
ctx->next_submit_worker_id = 0;
@@ -358,37 +358,37 @@
ctx->available_threads = ctx->num_frame_workers;
ctx->flushed = 0;
- ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
+ ctx->buffer_pool = (BufferPool *)aom_calloc(1, sizeof(BufferPool));
+ if (ctx->buffer_pool == NULL) return AOM_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
set_error_detail(ctx, "Failed to allocate buffer pool mutex");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
#endif
- ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
+ ctx->frame_workers = (AVxWorker *)aom_malloc(ctx->num_frame_workers *
sizeof(*ctx->frame_workers));
if (ctx->frame_workers == NULL) {
set_error_detail(ctx, "Failed to allocate frame_workers");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
for (i = 0; i < ctx->num_frame_workers; ++i) {
- VPxWorker *const worker = &ctx->frame_workers[i];
+ AVxWorker *const worker = &ctx->frame_workers[i];
FrameWorkerData *frame_worker_data = NULL;
winterface->init(worker);
- worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
+ worker->data1 = aom_memalign(32, sizeof(FrameWorkerData));
if (worker->data1 == NULL) {
set_error_detail(ctx, "Failed to allocate frame_worker_data");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
frame_worker_data = (FrameWorkerData *)worker->data1;
- frame_worker_data->pbi = vp10_decoder_create(ctx->buffer_pool);
+ frame_worker_data->pbi = av1_decoder_create(ctx->buffer_pool);
if (frame_worker_data->pbi == NULL) {
set_error_detail(ctx, "Failed to allocate frame_worker_data");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
frame_worker_data->pbi->frame_worker_owner = worker;
frame_worker_data->worker_id = i;
@@ -399,12 +399,12 @@
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) {
set_error_detail(ctx, "Failed to allocate frame_worker_data mutex");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) {
set_error_detail(ctx, "Failed to allocate frame_worker_data cond");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
#endif
// If decoding in serial mode, FrameWorker thread could create tile worker
@@ -415,35 +415,35 @@
frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
frame_worker_data->pbi->common.frame_parallel_decode =
ctx->frame_parallel_decode;
- worker->hook = (VPxWorkerHook)frame_worker_hook;
+ worker->hook = (AVxWorkerHook)frame_worker_hook;
if (!winterface->reset(worker)) {
set_error_detail(ctx, "Frame Worker thread creation failed");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
}
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
- if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
+ if (!ctx->postproc_cfg_set && (ctx->base.init_flags & AOM_CODEC_USE_POSTPROC))
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static INLINE void check_resync(vpx_codec_alg_priv_t *const ctx,
- const VP10Decoder *const pbi) {
+static INLINE void check_resync(aom_codec_alg_priv_t *const ctx,
+ const AV1Decoder *const pbi) {
// Clear resync flag if worker got a key frame or intra only frame.
if (ctx->need_resync == 1 && pbi->need_resync == 0 &&
(pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME))
ctx->need_resync = 0;
}
-static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t decode_one(aom_codec_alg_priv_t *ctx,
const uint8_t **data, unsigned int data_sz,
void *user_priv, int64_t deadline) {
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
(void)deadline;
// Determine the stream parameters. Note that we rely on peek_si to
@@ -451,16 +451,16 @@
// of the heap.
if (!ctx->si.h) {
int is_intra_only = 0;
- const vpx_codec_err_t res =
+ const aom_codec_err_t res =
decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK) return res;
+ if (res != AOM_CODEC_OK) return res;
- if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
+ if (!ctx->si.is_kf && !is_intra_only) return AOM_CODEC_ERROR;
}
if (!ctx->frame_parallel_decode) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->data = *data;
frame_worker_data->data_size = data_sz;
@@ -488,11 +488,11 @@
check_resync(ctx, frame_worker_data->pbi);
} else {
- VPxWorker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
+ AVxWorker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
// Copy context from last worker thread to next worker thread.
if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
- vp10_frameworker_copy_context(
+ av1_frameworker_copy_context(
&ctx->frame_workers[ctx->next_submit_worker_id],
&ctx->frame_workers[ctx->last_submit_worker_id]);
@@ -503,10 +503,10 @@
// avoid too many deallocate and allocate.
if (frame_worker_data->scratch_buffer_size < data_sz) {
frame_worker_data->scratch_buffer =
- (uint8_t *)vpx_realloc(frame_worker_data->scratch_buffer, data_sz);
+ (uint8_t *)aom_realloc(frame_worker_data->scratch_buffer, data_sz);
if (frame_worker_data->scratch_buffer == NULL) {
set_error_detail(ctx, "Failed to reallocate scratch buffer");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
frame_worker_data->scratch_buffer_size = data_sz;
}
@@ -530,13 +530,13 @@
winterface->launch(worker);
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
+static void wait_worker_and_cache_frame(aom_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
- VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+ AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
ctx->next_output_worker_id =
(ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
@@ -547,8 +547,8 @@
check_resync(ctx, frame_worker_data->pbi);
- if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
@@ -560,18 +560,18 @@
}
}
-static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
const uint8_t *data_start = data;
const uint8_t *const data_end = data + data_sz;
- vpx_codec_err_t res;
+ aom_codec_err_t res;
uint32_t frame_sizes[8];
int frame_count;
if (data == NULL && data_sz == 0) {
ctx->flushed = 1;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
// Reset flushed when receiving a valid frame.
@@ -579,13 +579,13 @@
// Initialize the decoder workers on the first frame.
if (ctx->frame_workers == NULL) {
- const vpx_codec_err_t res = init_decoder(ctx);
- if (res != VPX_CODEC_OK) return res;
+ const aom_codec_err_t res = init_decoder(ctx);
+ if (res != AOM_CODEC_OK) return res;
}
- res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
- ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK) return res;
+ res = av1_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
+ ctx->decrypt_cb, ctx->decrypt_state);
+ if (res != AOM_CODEC_OK) return res;
if (ctx->frame_parallel_decode) {
// Decode in frame parallel mode. When decoding in this mode, the frame
@@ -601,7 +601,7 @@
if (data_start < data ||
frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
- return VPX_CODEC_CORRUPT_FRAME;
+ return AOM_CODEC_CORRUPT_FRAME;
}
if (ctx->available_threads == 0) {
@@ -612,13 +612,13 @@
} else {
// TODO(hkuang): Add unit test to test this path.
set_error_detail(ctx, "Frame output cache is full.");
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
res =
decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
- if (res != VPX_CODEC_OK) return res;
+ if (res != AOM_CODEC_OK) return res;
data_start += frame_size;
}
} else {
@@ -630,12 +630,12 @@
} else {
// TODO(hkuang): Add unit test to test this path.
set_error_detail(ctx, "Frame output cache is full.");
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
res = decode_one(ctx, &data, data_sz, user_priv, deadline);
- if (res != VPX_CODEC_OK) return res;
+ if (res != AOM_CODEC_OK) return res;
}
} else {
// Decode in serial mode.
@@ -645,25 +645,25 @@
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
- vpx_codec_err_t res;
+ aom_codec_err_t res;
if (data_start < data ||
frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
- return VPX_CODEC_CORRUPT_FRAME;
+ return AOM_CODEC_CORRUPT_FRAME;
}
res =
decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
- if (res != VPX_CODEC_OK) return res;
+ if (res != AOM_CODEC_OK) return res;
data_start += frame_size;
}
} else {
while (data_start < data_end) {
const uint32_t frame_size = (uint32_t)(data_end - data_start);
- const vpx_codec_err_t res =
+ const aom_codec_err_t res =
decode_one(ctx, &data_start, frame_size, user_priv, deadline);
- if (res != VPX_CODEC_OK) return res;
+ if (res != AOM_CODEC_OK) return res;
// Account for suboptimal termination by the encoder.
while (data_start < data_end) {
@@ -679,7 +679,7 @@
return res;
}
-static void release_last_output_frame(vpx_codec_alg_priv_t *ctx) {
+static void release_last_output_frame(aom_codec_alg_priv_t *ctx) {
RefCntBuffer *const frame_bufs = ctx->buffer_pool->frame_bufs;
// Decrease reference count of last output frame in frame parallel mode.
if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) {
@@ -690,9 +690,9 @@
}
}
-static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter) {
- vpx_image_t *img = NULL;
+static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx,
+ aom_codec_iter_t *iter) {
+ aom_image_t *img = NULL;
// Only return frame when all the cpu are busy or
// application fluhsed the decoder in frame parallel decode.
@@ -717,8 +717,8 @@
if (*iter == NULL && ctx->frame_workers != NULL) {
do {
YV12_BUFFER_CONFIG sd;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
- VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+ AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
ctx->next_output_worker_id =
@@ -731,8 +731,8 @@
frame_worker_data->received_frame = 0;
check_resync(ctx, frame_worker_data->pbi);
}
- if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
release_last_output_frame(ctx);
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
@@ -742,7 +742,7 @@
#if CONFIG_EXT_TILE
if (frame_worker_data->pbi->dec_tile_row >= 0) {
const int tile_row =
- VPXMIN(frame_worker_data->pbi->dec_tile_row, cm->tile_rows - 1);
+ AOMMIN(frame_worker_data->pbi->dec_tile_row, cm->tile_rows - 1);
const int mi_row = tile_row * cm->tile_height;
const int ssy = ctx->img.y_chroma_shift;
int plane;
@@ -752,12 +752,12 @@
mi_row * (MI_SIZE >> ssy) * ctx->img.stride[plane];
}
ctx->img.d_h =
- VPXMIN(cm->tile_height, cm->mi_rows - mi_row) * MI_SIZE;
+ AOMMIN(cm->tile_height, cm->mi_rows - mi_row) * MI_SIZE;
}
if (frame_worker_data->pbi->dec_tile_col >= 0) {
const int tile_col =
- VPXMIN(frame_worker_data->pbi->dec_tile_col, cm->tile_cols - 1);
+ AOMMIN(frame_worker_data->pbi->dec_tile_col, cm->tile_cols - 1);
const int mi_col = tile_col * cm->tile_width;
const int ssx = ctx->img.x_chroma_shift;
int plane;
@@ -766,7 +766,7 @@
ctx->img.planes[plane] += mi_col * (MI_SIZE >> ssx);
}
ctx->img.d_w =
- VPXMIN(cm->tile_width, cm->mi_cols - mi_col) * MI_SIZE;
+ AOMMIN(cm->tile_width, cm->mi_cols - mi_col) * MI_SIZE;
}
#endif // CONFIG_EXT_TILE
@@ -786,271 +786,271 @@
return NULL;
}
-static vpx_codec_err_t decoder_set_fb_fn(
- vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
- vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
+static aom_codec_err_t decoder_set_fb_fn(
+ aom_codec_alg_priv_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get,
+ aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
} else if (ctx->frame_workers == NULL) {
// If the decoder has already been initialized, do not accept changes to
// the frame buffer functions.
ctx->get_ext_fb_cb = cb_get;
ctx->release_ext_fb_cb = cb_release;
ctx->ext_priv = cb_priv;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
-static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *);
+ aom_ref_frame_t *const data = va_arg(args, aom_ref_frame_t *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (data) {
- vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data;
+ aom_ref_frame_t *const frame = (aom_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp10_set_reference_dec(&frame_worker_data->pbi->common,
- ref_frame_to_vp10_reframe(frame->frame_type),
- &sd);
+ return av1_set_reference_dec(&frame_worker_data->pbi->common,
+ ref_frame_to_av1_reframe(frame->frame_type),
+ &sd);
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+ aom_ref_frame_t *data = va_arg(args, aom_ref_frame_t *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (data) {
- vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ aom_ref_frame_t *frame = (aom_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp10_copy_reference_dec(frame_worker_data->pbi,
- (VPX_REFFRAME)frame->frame_type, &sd);
+ return av1_copy_reference_dec(frame_worker_data->pbi,
+ (AOM_REFFRAME)frame->frame_type, &sd);
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_reference(aom_codec_alg_priv_t *ctx,
va_list args) {
- vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+ av1_ref_frame_t *data = va_arg(args, av1_ref_frame_t *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (data) {
YV12_BUFFER_CONFIG *fb;
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
- if (fb == NULL) return VPX_CODEC_ERROR;
+ if (fb == NULL) return AOM_CODEC_ERROR;
yuvconfig2image(&data->img, fb, NULL);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_get_new_frame_image(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_new_frame_image(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_image_t *new_img = va_arg(args, vpx_image_t *);
+ aom_image_t *new_img = va_arg(args, aom_image_t *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (new_img) {
YV12_BUFFER_CONFIG new_frame;
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
- if (vp10_get_frame_to_show(frame_worker_data->pbi, &new_frame) == 0) {
+ if (av1_get_frame_to_show(frame_worker_data->pbi, &new_frame) == 0) {
yuvconfig2image(new_img, &new_frame, NULL);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
} else {
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
}
-static vpx_codec_err_t ctrl_set_postproc(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_postproc(aom_codec_alg_priv_t *ctx,
va_list args) {
(void)ctx;
(void)args;
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
-static vpx_codec_err_t ctrl_set_dbg_options(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_dbg_options(aom_codec_alg_priv_t *ctx,
va_list args) {
(void)ctx;
(void)args;
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
-static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_last_ref_updates(aom_codec_alg_priv_t *ctx,
va_list args) {
int *const update_info = va_arg(args, int *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (update_info) {
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
*update_info = frame_worker_data->pbi->refresh_frame_flags;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_frame_corrupted(aom_codec_alg_priv_t *ctx,
va_list args) {
int *corrupted = va_arg(args, int *);
if (corrupted) {
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
RefCntBuffer *const frame_bufs =
frame_worker_data->pbi->common.buffer_pool->frame_bufs;
if (frame_worker_data->pbi->common.frame_to_show == NULL)
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
if (ctx->last_show_frame >= 0)
*corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_get_frame_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_frame_size(aom_codec_alg_priv_t *ctx,
va_list args) {
int *const frame_size = va_arg(args, int *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (frame_size) {
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
frame_size[0] = cm->width;
frame_size[1] = cm->height;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_get_render_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_render_size(aom_codec_alg_priv_t *ctx,
va_list args) {
int *const render_size = va_arg(args, int *);
// Only support this function in serial decode.
if (ctx->frame_parallel_decode) {
set_error_detail(ctx, "Not supported in frame parallel decode");
- return VPX_CODEC_INCAPABLE;
+ return AOM_CODEC_INCAPABLE;
}
if (render_size) {
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
render_size[0] = cm->render_width;
render_size[1] = cm->render_height;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_bit_depth(aom_codec_alg_priv_t *ctx,
va_list args) {
unsigned int *const bit_depth = va_arg(args, unsigned int *);
- VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+ AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
if (bit_depth) {
if (worker) {
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
*bit_depth = cm->bit_depth;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
} else {
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
}
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
}
-static vpx_codec_err_t ctrl_set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_invert_tile_order(aom_codec_alg_priv_t *ctx,
va_list args) {
ctx->invert_tile_order = va_arg(args, int);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_decryptor(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decryptor(aom_codec_alg_priv_t *ctx,
va_list args) {
- vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *);
+ aom_decrypt_init *init = va_arg(args, aom_decrypt_init *);
ctx->decrypt_cb = init ? init->decrypt_cb : NULL;
ctx->decrypt_state = init ? init->decrypt_state : NULL;
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_byte_alignment(aom_codec_alg_priv_t *ctx,
va_list args) {
const int legacy_byte_alignment = 0;
const int min_byte_alignment = 32;
@@ -1061,67 +1061,67 @@
(byte_alignment < min_byte_alignment ||
byte_alignment > max_byte_alignment ||
(byte_alignment & (byte_alignment - 1)) != 0))
- return VPX_CODEC_INVALID_PARAM;
+ return AOM_CODEC_INVALID_PARAM;
ctx->byte_alignment = byte_alignment;
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->pbi->common.byte_alignment = byte_alignment;
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_skip_loop_filter(aom_codec_alg_priv_t *ctx,
va_list args) {
ctx->skip_loop_filter = va_arg(args, int);
if (ctx->frame_workers) {
- VPxWorker *const worker = ctx->frame_workers;
+ AVxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->pbi->common.skip_loop_filter = ctx->skip_loop_filter;
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_decode_tile_row(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decode_tile_row(aom_codec_alg_priv_t *ctx,
va_list args) {
ctx->decode_tile_row = va_arg(args, int);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_err_t ctrl_set_decode_tile_col(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decode_tile_col(aom_codec_alg_priv_t *ctx,
va_list args) {
ctx->decode_tile_col = va_arg(args, int);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
- { VP8_COPY_REFERENCE, ctrl_copy_reference },
+static aom_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
+ { AOM_COPY_REFERENCE, ctrl_copy_reference },
// Setters
- { VP8_SET_REFERENCE, ctrl_set_reference },
- { VP8_SET_POSTPROC, ctrl_set_postproc },
- { VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
- { VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
- { VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
- { VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
- { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
- { VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
- { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
- { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
- { VP10_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row },
- { VP10_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col },
+ { AOM_SET_REFERENCE, ctrl_set_reference },
+ { AOM_SET_POSTPROC, ctrl_set_postproc },
+ { AOM_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
+ { AOM_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
+ { AOM_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
+ { AOM_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
+ { AV1_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
+ { AOMD_SET_DECRYPTOR, ctrl_set_decryptor },
+ { AV1_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
+ { AV1_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
+ { AV1_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row },
+ { AV1_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col },
// Getters
- { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
- { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
- { VP9_GET_REFERENCE, ctrl_get_reference },
- { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
- { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
- { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
- { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
+ { AOMD_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
+ { AOMD_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
+ { AV1_GET_REFERENCE, ctrl_get_reference },
+ { AV1D_GET_DISPLAY_SIZE, ctrl_get_render_size },
+ { AV1D_GET_BIT_DEPTH, ctrl_get_bit_depth },
+ { AV1D_GET_FRAME_SIZE, ctrl_get_frame_size },
+ { AV1_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
{ -1, NULL },
};
@@ -1129,31 +1129,31 @@
#ifndef VERSION_STRING
#define VERSION_STRING
#endif
-CODEC_INTERFACE(vpx_codec_vp10_dx) = {
- "WebM Project VP10 Decoder" VERSION_STRING,
- VPX_CODEC_INTERNAL_ABI_VERSION,
- VPX_CODEC_CAP_DECODER |
- VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t
- decoder_init, // vpx_codec_init_fn_t
- decoder_destroy, // vpx_codec_destroy_fn_t
- decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+CODEC_INTERFACE(aom_codec_av1_dx) = {
+ "AOMedia Project AV1 Decoder" VERSION_STRING,
+ AOM_CODEC_INTERNAL_ABI_VERSION,
+ AOM_CODEC_CAP_DECODER |
+ AOM_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // aom_codec_caps_t
+ decoder_init, // aom_codec_init_fn_t
+ decoder_destroy, // aom_codec_destroy_fn_t
+ decoder_ctrl_maps, // aom_codec_ctrl_fn_map_t
{
// NOLINT
- decoder_peek_si, // vpx_codec_peek_si_fn_t
- decoder_get_si, // vpx_codec_get_si_fn_t
- decoder_decode, // vpx_codec_decode_fn_t
- decoder_get_frame, // vpx_codec_frame_get_fn_t
- decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
+ decoder_peek_si, // aom_codec_peek_si_fn_t
+ decoder_get_si, // aom_codec_get_si_fn_t
+ decoder_decode, // aom_codec_decode_fn_t
+ decoder_get_frame, // aom_codec_frame_get_fn_t
+ decoder_set_fb_fn, // aom_codec_set_fb_fn_t
},
{
// NOLINT
0,
- NULL, // vpx_codec_enc_cfg_map_t
- NULL, // vpx_codec_encode_fn_t
- NULL, // vpx_codec_get_cx_data_fn_t
- NULL, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- NULL, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ NULL, // aom_codec_enc_cfg_map_t
+ NULL, // aom_codec_encode_fn_t
+ NULL, // aom_codec_get_cx_data_fn_t
+ NULL, // aom_codec_enc_config_set_fn_t
+ NULL, // aom_codec_get_global_headers_fn_t
+ NULL, // aom_codec_get_preview_frame_fn_t
+ NULL // aom_codec_enc_mr_get_mem_loc_fn_t
}
};
diff --git a/av1/av1_iface_common.h b/av1/av1_iface_common.h
new file mode 100644
index 0000000..3ba029e
--- /dev/null
+++ b/av1/av1_iface_common.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef AV1_AV1_IFACE_COMMON_H_
+#define AV1_AV1_IFACE_COMMON_H_
+
+#include "aom_ports/mem.h"
+
+static void yuvconfig2image(aom_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+ void *user_priv) {
+ /** aom_img_wrap() doesn't allow specifying independent strides for
+ * the Y, U, and V planes, nor other alignment adjustments that
+ * might be representable by a YV12_BUFFER_CONFIG, so we just
+ * initialize all the fields.*/
+ int bps;
+ if (!yv12->subsampling_y) {
+ if (!yv12->subsampling_x) {
+ img->fmt = AOM_IMG_FMT_I444;
+ bps = 24;
+ } else {
+ img->fmt = AOM_IMG_FMT_I422;
+ bps = 16;
+ }
+ } else {
+ if (!yv12->subsampling_x) {
+ img->fmt = AOM_IMG_FMT_I440;
+ bps = 16;
+ } else {
+ img->fmt = AOM_IMG_FMT_I420;
+ bps = 12;
+ }
+ }
+ img->cs = yv12->color_space;
+ img->range = yv12->color_range;
+ img->bit_depth = 8;
+ img->w = yv12->y_stride;
+ img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * AOM_ENC_BORDER_IN_PIXELS, 3);
+ img->d_w = yv12->y_crop_width;
+ img->d_h = yv12->y_crop_height;
+ img->r_w = yv12->render_width;
+ img->r_h = yv12->render_height;
+ img->x_chroma_shift = yv12->subsampling_x;
+ img->y_chroma_shift = yv12->subsampling_y;
+ img->planes[AOM_PLANE_Y] = yv12->y_buffer;
+ img->planes[AOM_PLANE_U] = yv12->u_buffer;
+ img->planes[AOM_PLANE_V] = yv12->v_buffer;
+ img->planes[AOM_PLANE_ALPHA] = NULL;
+ img->stride[AOM_PLANE_Y] = yv12->y_stride;
+ img->stride[AOM_PLANE_U] = yv12->uv_stride;
+ img->stride[AOM_PLANE_V] = yv12->uv_stride;
+ img->stride[AOM_PLANE_ALPHA] = yv12->y_stride;
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
+ // aom_image_t uses byte strides and a pointer to the first byte
+ // of the image.
+ img->fmt = (aom_img_fmt_t)(img->fmt | AOM_IMG_FMT_HIGHBITDEPTH);
+ img->bit_depth = yv12->bit_depth;
+ img->planes[AOM_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
+ img->planes[AOM_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
+ img->planes[AOM_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
+ img->planes[AOM_PLANE_ALPHA] = NULL;
+ img->stride[AOM_PLANE_Y] = 2 * yv12->y_stride;
+ img->stride[AOM_PLANE_U] = 2 * yv12->uv_stride;
+ img->stride[AOM_PLANE_V] = 2 * yv12->uv_stride;
+ img->stride[AOM_PLANE_ALPHA] = 2 * yv12->y_stride;
+ }
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ img->bps = bps;
+ img->user_priv = user_priv;
+ img->img_data = yv12->buffer_alloc;
+ img->img_data_owner = 0;
+ img->self_allocd = 0;
+}
+
+static aom_codec_err_t image2yuvconfig(const aom_image_t *img,
+ YV12_BUFFER_CONFIG *yv12) {
+ yv12->y_buffer = img->planes[AOM_PLANE_Y];
+ yv12->u_buffer = img->planes[AOM_PLANE_U];
+ yv12->v_buffer = img->planes[AOM_PLANE_V];
+
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
+ yv12->render_width = img->r_w;
+ yv12->render_height = img->r_h;
+ yv12->y_width = img->d_w;
+ yv12->y_height = img->d_h;
+
+ yv12->uv_width =
+ img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
+ yv12->uv_height =
+ img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
+ yv12->uv_crop_width = yv12->uv_width;
+ yv12->uv_crop_height = yv12->uv_height;
+
+ yv12->y_stride = img->stride[AOM_PLANE_Y];
+ yv12->uv_stride = img->stride[AOM_PLANE_U];
+ yv12->color_space = img->cs;
+ yv12->color_range = img->range;
+
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
+ // In aom_image_t
+ // planes point to uint8 address of start of data
+ // stride counts uint8s to reach next row
+ // In YV12_BUFFER_CONFIG
+ // y_buffer, u_buffer, v_buffer point to uint16 address of data
+ // stride and border counts in uint16s
+ // This means that all the address calculations in the main body of code
+ // should work correctly.
+ // However, before we do any pixel operations we need to cast the address
+ // to a uint16 ponter and double its value.
+ yv12->y_buffer = CONVERT_TO_BYTEPTR(yv12->y_buffer);
+ yv12->u_buffer = CONVERT_TO_BYTEPTR(yv12->u_buffer);
+ yv12->v_buffer = CONVERT_TO_BYTEPTR(yv12->v_buffer);
+ yv12->y_stride >>= 1;
+ yv12->uv_stride >>= 1;
+ yv12->flags = YV12_FLAG_HIGHBITDEPTH;
+ } else {
+ yv12->flags = 0;
+ }
+ yv12->border = (yv12->y_stride - img->w) / 2;
+#else
+ yv12->border = (img->stride[AOM_PLANE_Y] - img->w) / 2;
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ yv12->subsampling_x = img->x_chroma_shift;
+ yv12->subsampling_y = img->y_chroma_shift;
+ return AOM_CODEC_OK;
+}
+
+static AOM_REFFRAME ref_frame_to_av1_reframe(aom_ref_frame_type_t frame) {
+ switch (frame) {
+ case AOM_LAST_FRAME: return AOM_LAST_FLAG;
+ case AOM_GOLD_FRAME: return AOM_GOLD_FLAG;
+ case AOM_ALTR_FRAME: return AOM_ALT_FLAG;
+ }
+ assert(0 && "Invalid Reference Frame");
+ return AOM_LAST_FLAG;
+}
+#endif // AV1_AV1_IFACE_COMMON_H_
diff --git a/av1/av1cx.mk b/av1/av1cx.mk
new file mode 100644
index 0000000..463c5f7
--- /dev/null
+++ b/av1/av1cx.mk
@@ -0,0 +1,147 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_CX_EXPORTS += exports_enc
+
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no += $(AV1_COMMON_SRCS_REMOVE-no)
+
+AV1_CX_SRCS-yes += av1_cx_iface.c
+
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/bitwriter.h
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/variance_tree.c
+AV1_CX_SRCS-yes += encoder/variance_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/palette.h
+AV1_CX_SRCS-yes += encoder/palette.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
+
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
+ifeq ($(CONFIG_DERING),yes)
+AV1_CX_SRCS-yes += encoder/pickdering.c
+endif
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+endif
+
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+
+ifeq ($(ARCH_X86_64),yes)
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+endif
+
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/av1_highbd_quantize_sse4.c
+endif
+
+ifeq ($(CONFIG_EXT_INTER),yes)
+AV1_CX_SRCS-yes += encoder/wedge_utils.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
+endif
+
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+endif
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/av1dx.mk b/av1/av1dx.mk
new file mode 100644
index 0000000..0b74abe
--- /dev/null
+++ b/av1/av1dx.mk
@@ -0,0 +1,34 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_DX_EXPORTS += exports_dec
+
+AV1_DX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_DX_SRCS-no += $(AV1_COMMON_SRCS-no)
+AV1_DX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_DX_SRCS_REMOVE-no += $(AV1_COMMON_SRCS_REMOVE-no)
+
+AV1_DX_SRCS-yes += av1_dx_iface.c
+
+AV1_DX_SRCS-yes += decoder/decodemv.c
+AV1_DX_SRCS-yes += decoder/decodeframe.c
+AV1_DX_SRCS-yes += decoder/decodeframe.h
+AV1_DX_SRCS-yes += decoder/detokenize.c
+AV1_DX_SRCS-yes += decoder/decodemv.h
+AV1_DX_SRCS-yes += decoder/detokenize.h
+AV1_DX_SRCS-yes += decoder/dthread.c
+AV1_DX_SRCS-yes += decoder/dthread.h
+AV1_DX_SRCS-yes += decoder/decoder.c
+AV1_DX_SRCS-yes += decoder/decoder.h
+AV1_DX_SRCS-yes += decoder/dsubexp.c
+AV1_DX_SRCS-yes += decoder/dsubexp.h
+AV1_DX_SRCS-yes += decoder/bitreader.h
+
+AV1_DX_SRCS-yes := $(filter-out $(AV1_DX_SRCS_REMOVE-yes),$(AV1_DX_SRCS-yes))
diff --git a/av1/common/alloccommon.c b/av1/common/alloccommon.c
index b6ff12a..eb4f8e6 100644
--- a/av1/common/alloccommon.c
+++ b/av1/common/alloccommon.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/alloccommon.h"
#include "av1/common/blockd.h"
@@ -17,7 +17,7 @@
#include "av1/common/entropymv.h"
#include "av1/common/onyxc_int.h"
-void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
+void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
@@ -30,11 +30,11 @@
cm->MBs = cm->mb_rows * cm->mb_cols;
}
-static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
+static int alloc_seg_map(AV1_COMMON *cm, int seg_map_size) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
- cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+ cm->seg_map_array[i] = (uint8_t *)aom_calloc(seg_map_size, 1);
if (cm->seg_map_array[i] == NULL) return 1;
}
cm->seg_map_alloc_size = seg_map_size;
@@ -50,11 +50,11 @@
return 0;
}
-static void free_seg_map(VP10_COMMON *cm) {
+static void free_seg_map(AV1_COMMON *cm) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
- vpx_free(cm->seg_map_array[i]);
+ aom_free(cm->seg_map_array[i]);
cm->seg_map_array[i] = NULL;
}
@@ -65,7 +65,7 @@
}
}
-void vp10_free_ref_frame_buffers(BufferPool *pool) {
+void av1_free_ref_frame_buffers(BufferPool *pool) {
int i;
for (i = 0; i < FRAME_BUFFERS; ++i) {
@@ -74,45 +74,45 @@
pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
pool->frame_bufs[i].ref_count = 0;
}
- vpx_free(pool->frame_bufs[i].mvs);
+ aom_free(pool->frame_bufs[i].mvs);
pool->frame_bufs[i].mvs = NULL;
- vpx_free_frame_buffer(&pool->frame_bufs[i].buf);
+ aom_free_frame_buffer(&pool->frame_bufs[i].buf);
}
}
#if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(VP10_COMMON *cm) {
- vpx_free(cm->rst_info.bilateral_level);
+void av1_free_restoration_buffers(AV1_COMMON *cm) {
+ aom_free(cm->rst_info.bilateral_level);
cm->rst_info.bilateral_level = NULL;
- vpx_free(cm->rst_info.vfilter);
+ aom_free(cm->rst_info.vfilter);
cm->rst_info.vfilter = NULL;
- vpx_free(cm->rst_info.hfilter);
+ aom_free(cm->rst_info.hfilter);
cm->rst_info.hfilter = NULL;
- vpx_free(cm->rst_info.wiener_level);
+ aom_free(cm->rst_info.wiener_level);
cm->rst_info.wiener_level = NULL;
}
#endif // CONFIG_LOOP_RESTORATION
-void vp10_free_context_buffers(VP10_COMMON *cm) {
+void av1_free_context_buffers(AV1_COMMON *cm) {
int i;
cm->free_mi(cm);
free_seg_map(cm);
for (i = 0; i < MAX_MB_PLANE; i++) {
- vpx_free(cm->above_context[i]);
+ aom_free(cm->above_context[i]);
cm->above_context[i] = NULL;
}
- vpx_free(cm->above_seg_context);
+ aom_free(cm->above_seg_context);
cm->above_seg_context = NULL;
#if CONFIG_VAR_TX
- vpx_free(cm->above_txfm_context);
+ aom_free(cm->above_txfm_context);
cm->above_txfm_context = NULL;
#endif
}
-int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
+int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
int new_mi_size;
- vp10_set_mb_mi(cm, width, height);
+ av1_set_mb_mi(cm, width, height);
new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
if (cm->mi_alloc_size < new_mi_size) {
cm->free_mi(cm);
@@ -134,20 +134,20 @@
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
- vpx_free(cm->above_context[i]);
- cm->above_context[i] = (ENTROPY_CONTEXT *)vpx_calloc(
+ aom_free(cm->above_context[i]);
+ cm->above_context[i] = (ENTROPY_CONTEXT *)aom_calloc(
2 * aligned_mi_cols, sizeof(*cm->above_context[0]));
if (!cm->above_context[i]) goto fail;
}
- vpx_free(cm->above_seg_context);
- cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc(
+ aom_free(cm->above_seg_context);
+ cm->above_seg_context = (PARTITION_CONTEXT *)aom_calloc(
aligned_mi_cols, sizeof(*cm->above_seg_context));
if (!cm->above_seg_context) goto fail;
#if CONFIG_VAR_TX
- vpx_free(cm->above_txfm_context);
- cm->above_txfm_context = (TXFM_CONTEXT *)vpx_calloc(
+ aom_free(cm->above_txfm_context);
+ cm->above_txfm_context = (TXFM_CONTEXT *)aom_calloc(
aligned_mi_cols, sizeof(*cm->above_txfm_context));
if (!cm->above_txfm_context) goto fail;
#endif
@@ -159,27 +159,27 @@
fail:
// clear the mi_* values to force a realloc on resync
- vp10_set_mb_mi(cm, 0, 0);
- vp10_free_context_buffers(cm);
+ av1_set_mb_mi(cm, 0, 0);
+ av1_free_context_buffers(cm);
return 1;
}
-void vp10_remove_common(VP10_COMMON *cm) {
- vp10_free_context_buffers(cm);
+void av1_remove_common(AV1_COMMON *cm) {
+ av1_free_context_buffers(cm);
- vpx_free(cm->fc);
+ aom_free(cm->fc);
cm->fc = NULL;
- vpx_free(cm->frame_contexts);
+ aom_free(cm->frame_contexts);
cm->frame_contexts = NULL;
}
-void vp10_init_context_buffers(VP10_COMMON *cm) {
+void av1_init_context_buffers(AV1_COMMON *cm) {
cm->setup_mi(cm);
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
}
-void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) {
+void av1_swap_current_and_last_seg_map(AV1_COMMON *cm) {
// Swap indices.
const int tmp = cm->seg_map_idx;
cm->seg_map_idx = cm->prev_seg_map_idx;
diff --git a/av1/common/alloccommon.h b/av1/common/alloccommon.h
index d2d2643..ad0b454 100644
--- a/av1/common/alloccommon.h
+++ b/av1/common/alloccommon.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ALLOCCOMMON_H_
-#define VP10_COMMON_ALLOCCOMMON_H_
+#ifndef AV1_COMMON_ALLOCCOMMON_H_
+#define AV1_COMMON_ALLOCCOMMON_H_
#define INVALID_IDX -1 // Invalid buffer index.
@@ -17,29 +17,29 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
struct BufferPool;
-void vp10_remove_common(struct VP10Common *cm);
+void av1_remove_common(struct AV1Common *cm);
-int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height);
-void vp10_init_context_buffers(struct VP10Common *cm);
-void vp10_free_context_buffers(struct VP10Common *cm);
+int av1_alloc_context_buffers(struct AV1Common *cm, int width, int height);
+void av1_init_context_buffers(struct AV1Common *cm);
+void av1_free_context_buffers(struct AV1Common *cm);
-void vp10_free_ref_frame_buffers(struct BufferPool *pool);
+void av1_free_ref_frame_buffers(struct BufferPool *pool);
#if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(struct VP10Common *cm);
+void av1_free_restoration_buffers(struct AV1Common *cm);
#endif // CONFIG_LOOP_RESTORATION
-int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height);
-void vp10_free_state_buffers(struct VP10Common *cm);
+int av1_alloc_state_buffers(struct AV1Common *cm, int width, int height);
+void av1_free_state_buffers(struct AV1Common *cm);
-void vp10_set_mb_mi(struct VP10Common *cm, int width, int height);
+void av1_set_mb_mi(struct AV1Common *cm, int width, int height);
-void vp10_swap_current_and_last_seg_map(struct VP10Common *cm);
+void av1_swap_current_and_last_seg_map(struct AV1Common *cm);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_ALLOCCOMMON_H_
+#endif // AV1_COMMON_ALLOCCOMMON_H_
diff --git a/av1/common/ans.h b/av1/common/ans.h
index c974ada..1a632ee 100644
--- a/av1/common/ans.h
+++ b/av1/common/ans.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ANS_H_
-#define VP10_COMMON_ANS_H_
+#ifndef AV1_COMMON_ANS_H_
+#define AV1_COMMON_ANS_H_
// An implementation of Asymmetric Numeral Systems
// http://arxiv.org/abs/1311.2540v2
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/prob.h"
#include "aom_ports/mem_ops.h"
@@ -250,9 +250,9 @@
// TODO(aconverse): Replace trees with tokensets.
static INLINE int uabs_read_tree(struct AnsDecoder *ans,
- const vpx_tree_index *tree,
+ const aom_tree_index *tree,
const AnsP8 *probs) {
- vpx_tree_index i = 0;
+ aom_tree_index i = 0;
while ((i = tree[i + uabs_read(ans, probs[i >> 1])]) > 0) continue;
@@ -313,8 +313,8 @@
adjustment -= out_pdf[0];
for (i = 0; i < in_syms; ++i) {
int p = (p1 * src_pdf[i] + round_fact) >> ans_p8_shift;
- p = VPXMIN(p, (int)rans_precision - in_syms);
- p = VPXMAX(p, 1);
+ p = AOMMIN(p, (int)rans_precision - in_syms);
+ p = AOMMAX(p, 1);
out_pdf[i + 1] = p;
adjustment -= p;
}
@@ -411,4 +411,4 @@
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
-#endif // VP10_COMMON_ANS_H_
+#endif // AV1_COMMON_ANS_H_
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index 600e66b..fc72c98 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -11,8 +11,8 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
#include "av1/common/common.h"
static int16_t sinpi_1_9 = 0x14a3;
@@ -139,8 +139,8 @@
return;
}
-void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
- int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
+ int dest_stride, int tx_type) {
uint8x8_t d26u8, d27u8;
int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
uint32x2_t d26u32, d27u32;
@@ -156,7 +156,7 @@
switch (tx_type) {
case 0: // idct_idct is not supported. Fall back to C
- vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+ av1_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
return;
break;
case 1: // iadst_idct
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index ff5578d..8421926 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -11,8 +11,8 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
#include "av1/common/common.h"
static int16_t cospi_2_64 = 16305;
@@ -471,8 +471,8 @@
return;
}
-void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
- int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
+ int dest_stride, int tx_type) {
int i;
uint8_t *d1, *d2;
uint8x8_t d0u8, d1u8, d2u8, d3u8;
@@ -494,7 +494,7 @@
switch (tx_type) {
case 0: // idct_idct is not supported. Fall back to C
- vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+ av1_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
return;
break;
case 1: // iadst_idct
diff --git a/av1/common/vp10_convolve.c b/av1/common/av1_convolve.c
similarity index 66%
rename from av1/common/vp10_convolve.c
rename to av1/common/av1_convolve.c
index b62bae5..dec6759 100644
--- a/av1/common/vp10_convolve.c
+++ b/av1/common/av1_convolve.c
@@ -1,10 +1,10 @@
#include <assert.h>
#include <string.h>
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_convolve.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_convolve.h"
#include "av1/common/filter.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#define MAX_BLOCK_WIDTH (MAX_SB_SIZE)
@@ -12,10 +12,10 @@
#define MAX_STEP (32)
#define MAX_FILTER_TAP (12)
-void vp10_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_x_q4, int x_step_q4, int avg) {
int x, y;
int filter_size = filter_params.taps;
src -= filter_size / 2 - 1;
@@ -23,7 +23,7 @@
int x_q4 = subpel_x_q4;
for (x = 0; x < w; ++x) {
const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
- const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
filter_params, x_q4 & SUBPEL_MASK);
int k, sum = 0;
for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -40,10 +40,10 @@
}
}
-void vp10_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_y_q4, int y_step_q4, int avg) {
int x, y;
int filter_size = filter_params.taps;
src -= src_stride * (filter_size / 2 - 1);
@@ -52,7 +52,7 @@
int y_q4 = subpel_y_q4;
for (y = 0; y < h; ++y) {
const uint8_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
- const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
filter_params, y_q4 & SUBPEL_MASK);
int k, sum = 0;
for (k = 0; k < filter_size; ++k)
@@ -93,15 +93,15 @@
}
}
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
- int y_step_q4, int ref_idx) {
+ const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
+ int y_step_q4, int ref_idx) {
int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
int ignore_vert = y_step_q4 == 16 && subpel_y_q4 == 0;
@@ -115,25 +115,25 @@
} else if (ignore_vert) {
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
assert(filter_params.taps <= MAX_FILTER_TAP);
- vp10_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_x_q4, x_step_q4, ref_idx);
+ av1_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_x_q4, x_step_q4, ref_idx);
} else if (ignore_horiz) {
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter[2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[2 * ref_idx]);
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
assert(filter_params.taps <= MAX_FILTER_TAP);
- vp10_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_y_q4, y_step_q4, ref_idx);
+ av1_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_y_q4, y_step_q4, ref_idx);
} else {
// temp's size is set to (maximum possible intermediate_height) *
// MAX_BLOCK_WIDTH
@@ -143,9 +143,9 @@
int temp_stride = MAX_BLOCK_WIDTH;
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params_x =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
InterpFilterParams filter_params_y =
- vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
InterpFilterParams filter_params = filter_params_x;
// The filter size implies the required number of reference pixels for
@@ -154,7 +154,7 @@
int filter_size = filter_params_y.taps;
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
int filter_size = filter_params.taps;
#endif
int intermediate_height =
@@ -162,30 +162,30 @@
assert(filter_params.taps <= MAX_FILTER_TAP);
- vp10_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
- temp, temp_stride, w, intermediate_height,
- filter_params, subpel_x_q4, x_step_q4, 0);
+ av1_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
+ temp, temp_stride, w, intermediate_height, filter_params,
+ subpel_x_q4, x_step_q4, 0);
#if CONFIG_DUAL_FILTER
filter_params = filter_params_y;
#else
- filter_params = vp10_get_interp_filter_params(interp_filter);
+ filter_params = av1_get_interp_filter_params(interp_filter);
#endif
filter_size = filter_params.taps;
assert(filter_params.taps <= MAX_FILTER_TAP);
- vp10_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
- dst, dst_stride, w, h, filter_params, subpel_y_q4,
- y_step_q4, ref_idx);
+ av1_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
+ dst, dst_stride, w, h, filter_params, subpel_y_q4,
+ y_step_q4, ref_idx);
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
- uint16_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_x_q4, int x_step_q4, int avg,
- int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_x_q4, int x_step_q4, int avg,
+ int bd) {
int x, y;
int filter_size = filter_params.taps;
src -= filter_size / 2 - 1;
@@ -193,7 +193,7 @@
int x_q4 = subpel_x_q4;
for (x = 0; x < w; ++x) {
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
- const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+ const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
filter_params, x_q4 & SUBPEL_MASK);
int k, sum = 0;
for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -211,11 +211,11 @@
}
}
-void vp10_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
- uint16_t *dst, int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_y_q4, int y_step_q4, int avg,
- int bd) {
+void av1_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_y_q4, int y_step_q4, int avg,
+ int bd) {
int x, y;
int filter_size = filter_params.taps;
src -= src_stride * (filter_size / 2 - 1);
@@ -224,7 +224,7 @@
int y_q4 = subpel_y_q4;
for (y = 0; y < h; ++y) {
const uint16_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
- const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+ const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
filter_params, y_q4 & SUBPEL_MASK);
int k, sum = 0;
for (k = 0; k < filter_size; ++k)
@@ -267,16 +267,16 @@
}
}
-void vp10_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
- int dst_stride, int w, int h,
+void av1_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
+ int dst_stride, int w, int h,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- const int subpel_x_q4, int x_step_q4,
- const int subpel_y_q4, int y_step_q4, int ref_idx,
- int bd) {
+ const int subpel_x_q4, int x_step_q4,
+ const int subpel_y_q4, int y_step_q4, int ref_idx,
+ int bd) {
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
@@ -292,25 +292,25 @@
} else if (ignore_vert) {
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
- vp10_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_x_q4, x_step_q4, ref_idx,
- bd);
+ av1_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_x_q4, x_step_q4, ref_idx,
+ bd);
} else if (ignore_horiz) {
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
- vp10_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_y_q4, y_step_q4, ref_idx,
- bd);
+ av1_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_y_q4, y_step_q4, ref_idx,
+ bd);
} else {
// temp's size is set to (maximum possible intermediate_height) *
// MAX_BLOCK_WIDTH
@@ -321,21 +321,21 @@
#if CONFIG_DUAL_FILTER
InterpFilterParams filter_params_x =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
InterpFilterParams filter_params_y =
- vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
InterpFilterParams filter_params = filter_params_x;
int filter_size = filter_params_y.taps;
#else
InterpFilterParams filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
int filter_size = filter_params.taps;
#endif
int intermediate_height =
(((h - 1) * y_step_q4 + subpel_y_q4) >> SUBPEL_BITS) + filter_size;
- vp10_highbd_convolve_horiz(
+ av1_highbd_convolve_horiz(
src - src_stride * (filter_size / 2 - 1), src_stride, temp, temp_stride,
w, intermediate_height, filter_params, subpel_x_q4, x_step_q4, 0, bd);
@@ -345,9 +345,9 @@
filter_size = filter_params.taps;
assert(filter_params.taps <= MAX_FILTER_TAP);
- vp10_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
- temp_stride, dst, dst_stride, w, h, filter_params,
- subpel_y_q4, y_step_q4, ref_idx, bd);
+ av1_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
+ temp_stride, dst, dst_stride, w, h, filter_params,
+ subpel_y_q4, y_step_q4, ref_idx, bd);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_convolve.h b/av1/common/av1_convolve.h
new file mode 100644
index 0000000..f082a8a
--- /dev/null
+++ b/av1/common/av1_convolve.h
@@ -0,0 +1,35 @@
+#ifndef AV1_COMMON_AV1_CONVOLVE_H_
+#define AV1_COMMON_AV1_CONVOLVE_H_
+#include "av1/common/filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+ const INTERP_FILTER *interp_filter,
+#else
+ const INTERP_FILTER interp_filter,
+#endif
+ const int subpel_x, int xstep, const int subpel_y, int ystep,
+ int avg);
+
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+ const INTERP_FILTER *interp_filter,
+#else
+ const INTERP_FILTER interp_filter,
+#endif
+ const int subpel_x, int xstep, const int subpel_y,
+ int ystep, int avg, int bd);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // AV1_COMMON_AV1_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm.c b/av1/common/av1_fwd_txfm.c
similarity index 92%
rename from av1/common/vp10_fwd_txfm.c
rename to av1/common/av1_fwd_txfm.c
index eb1c018..221f4e1 100644
--- a/av1/common/vp10_fwd_txfm.c
+++ b/av1/common/av1_fwd_txfm.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
-void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
}
}
-void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 4; ++r)
@@ -87,8 +87,7 @@
output[1] = 0;
}
-void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
- int stride) {
+void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
int i, j;
tran_low_t intermediate[64];
int pass;
@@ -173,7 +172,7 @@
}
}
-void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 8; ++r)
@@ -183,7 +182,7 @@
output[1] = 0;
}
-void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -363,7 +362,7 @@
}
}
-void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 16; ++r)
@@ -386,7 +385,7 @@
return rv;
}
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
tran_high_t step[32];
// Stage 1
step[0] = input[0] + input[(32 - 1)];
@@ -709,7 +708,7 @@
output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
}
-void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -717,7 +716,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -726,7 +725,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -736,7 +735,7 @@
// Note that although we use dct_32_round in dct32 computation flow,
// this 2d fdct32x32 for rate-distortion optimization loop is operating
// within 16 bits precision.
-void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -744,11 +743,11 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
// TODO(cd): see quality impact of only doing
// output[j * 32 + i] = (temp_out[j] + 1) >> 2;
- // PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c
+ // PS: also change code in av1_dsp/x86/av1_dct_sse2.c
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -756,12 +755,12 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vp10_fdct32(temp_in, temp_out, 1);
+ av1_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 32; ++r)
@@ -771,44 +770,43 @@
output[1] = 0;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
- int stride) {
- vp10_fdct4x4_c(input, output, stride);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+ int stride) {
+ av1_fdct4x4_c(input, output, stride);
}
-void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
- int stride) {
- vp10_fdct8x8_c(input, final_output, stride);
+void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+ int stride) {
+ av1_fdct8x8_c(input, final_output, stride);
}
-void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
- int stride) {
- vp10_fdct8x8_1_c(input, final_output, stride);
+void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+ int stride) {
+ av1_fdct8x8_1_c(input, final_output, stride);
}
-void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
- int stride) {
- vp10_fdct16x16_c(input, output, stride);
+void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+ int stride) {
+ av1_fdct16x16_c(input, output, stride);
}
-void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+ int stride) {
+ av1_fdct16x16_1_c(input, output, stride);
+}
+
+void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+ av1_fdct32x32_c(input, out, stride);
+}
+
+void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
int stride) {
- vp10_fdct16x16_1_c(input, output, stride);
+ av1_fdct32x32_rd_c(input, out, stride);
}
-void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
- int stride) {
- vp10_fdct32x32_c(input, out, stride);
+void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+ int stride) {
+ av1_fdct32x32_1_c(input, out, stride);
}
-
-void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
- int stride) {
- vp10_fdct32x32_rd_c(input, out, stride);
-}
-
-void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
- int stride) {
- vp10_fdct32x32_1_c(input, out, stride);
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_fwd_txfm.h b/av1/common/av1_fwd_txfm.h
similarity index 71%
rename from av1/common/vp10_fwd_txfm.h
rename to av1/common/av1_fwd_txfm.h
index a0481d3..96d942e 100644
--- a/av1/common/vp10_fwd_txfm.h
+++ b/av1/common/av1_fwd_txfm.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_VP10_FWD_TXFM_H_
-#define VP10_COMMON_VP10_FWD_TXFM_H_
+#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
+#define AV1_COMMON_AV1_FWD_TXFM_H_
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/fwd_txfm.h"
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif // VP10_COMMON_VP10_FWD_TXFM_H_
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif // AV1_COMMON_AV1_FWD_TXFM_H_
diff --git a/av1/common/vp10_fwd_txfm1d.c b/av1/common/av1_fwd_txfm1d.c
similarity index 98%
rename from av1/common/vp10_fwd_txfm1d.c
rename to av1/common/av1_fwd_txfm1d.c
index 6dff077..3dc960c 100644
--- a/av1/common/vp10_fwd_txfm1d.c
+++ b/av1/common/av1_fwd_txfm1d.c
@@ -9,7 +9,7 @@
*/
#include <stdlib.h>
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#define range_check(stage, input, buf, size, bit) \
{ \
@@ -40,8 +40,8 @@
}
#endif
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range) {
const int32_t size = 4;
const int32_t *cospi;
@@ -83,8 +83,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range) {
const int32_t size = 8;
const int32_t *cospi;
@@ -168,8 +168,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 16;
const int32_t *cospi;
@@ -339,8 +339,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 32;
const int32_t *cospi;
@@ -700,8 +700,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 4;
const int32_t *cospi;
@@ -765,8 +765,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 8;
const int32_t *cospi;
@@ -880,8 +880,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 16;
const int32_t *cospi;
@@ -1094,8 +1094,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 32;
const int32_t *cospi;
diff --git a/av1/common/av1_fwd_txfm1d.h b/av1/common/av1_fwd_txfm1d.h
new file mode 100644
index 0000000..7aab70e
--- /dev/null
+++ b/av1/common/av1_fwd_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_FWD_TXFM1D_H_
+#define AV1_FWD_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range);
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range);
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AV1_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_fwd_txfm2d.c b/av1/common/av1_fwd_txfm2d.c
similarity index 76%
rename from av1/common/vp10_fwd_txfm2d.c
rename to av1/common/av1_fwd_txfm2d.c
index 85c6b68..dc984e1 100644
--- a/av1/common/vp10_fwd_txfm2d.c
+++ b/av1/common/av1_fwd_txfm2d.c
@@ -10,22 +10,22 @@
#include <assert.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
-#include "av1/common/vp10_fwd_txfm2d_cfg.h"
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
static INLINE TxfmFunc fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
switch (txfm_type) {
- case TXFM_TYPE_DCT4: return vp10_fdct4_new;
- case TXFM_TYPE_DCT8: return vp10_fdct8_new;
- case TXFM_TYPE_DCT16: return vp10_fdct16_new;
- case TXFM_TYPE_DCT32: return vp10_fdct32_new;
- case TXFM_TYPE_ADST4: return vp10_fadst4_new;
- case TXFM_TYPE_ADST8: return vp10_fadst8_new;
- case TXFM_TYPE_ADST16: return vp10_fadst16_new;
- case TXFM_TYPE_ADST32: return vp10_fadst32_new;
+ case TXFM_TYPE_DCT4: return av1_fdct4_new;
+ case TXFM_TYPE_DCT8: return av1_fdct8_new;
+ case TXFM_TYPE_DCT16: return av1_fdct16_new;
+ case TXFM_TYPE_DCT32: return av1_fdct32_new;
+ case TXFM_TYPE_ADST4: return av1_fadst4_new;
+ case TXFM_TYPE_ADST8: return av1_fadst8_new;
+ case TXFM_TYPE_ADST16: return av1_fadst16_new;
+ case TXFM_TYPE_ADST32: return av1_fadst32_new;
default: assert(0); return NULL;
}
}
@@ -76,42 +76,42 @@
}
}
-void vp10_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+void av1_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
+ int tx_type, int bd) {
int32_t txfm_buf[4 * 4];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_4X4);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_4X4);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
-void vp10_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+void av1_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
+ int tx_type, int bd) {
int32_t txfm_buf[8 * 8];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_8X8);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_8X8);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
-void vp10_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+void av1_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
+ int tx_type, int bd) {
int32_t txfm_buf[16 * 16];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_16X16);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_16X16);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
-void vp10_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
+ int tx_type, int bd) {
int32_t txfm_buf[32 * 32];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
-void vp10_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
+ int tx_type, int bd) {
int32_t txfm_buf[64 * 64];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
@@ -150,14 +150,14 @@
};
#endif // CONFIG_EXT_TX
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
TXFM_2D_FLIP_CFG cfg;
set_flip_cfg(tx_type, &cfg);
cfg.cfg = fwd_txfm_cfg_ls[tx_type][tx_size];
return cfg;
}
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
TXFM_2D_FLIP_CFG cfg;
switch (tx_type) {
case DCT_DCT:
diff --git a/av1/common/vp10_fwd_txfm2d_cfg.h b/av1/common/av1_fwd_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_fwd_txfm2d_cfg.h
rename to av1/common/av1_fwd_txfm2d_cfg.h
index f780b87..49d324d 100644
--- a/av1/common/vp10_fwd_txfm2d_cfg.h
+++ b/av1/common/av1_fwd_txfm2d_cfg.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_FWD_TXFM2D_CFG_H_
-#define VP10_FWD_TXFM2D_CFG_H_
+#ifndef AV1_FWD_TXFM2D_CFG_H_
+#define AV1_FWD_TXFM2D_CFG_H_
#include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
// ---------------- config fwd_dct_dct_4 ----------------
static const int8_t fwd_shift_dct_dct_4[3] = { 2, 0, 0 };
static const int8_t fwd_stage_range_col_dct_dct_4[4] = { 15, 16, 17, 17 };
@@ -440,4 +440,4 @@
TXFM_TYPE_ADST32, // .txfm_type_col
TXFM_TYPE_DCT32
}; // .txfm_type_row
-#endif // VP10_FWD_TXFM2D_CFG_H_
+#endif // AV1_FWD_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_inv_txfm.c b/av1/common/av1_inv_txfm.c
similarity index 95%
rename from av1/common/vp10_inv_txfm.c
rename to av1/common/av1_inv_txfm.c
index a74de09..76a49a2 100644
--- a/av1/common/vp10_inv_txfm.c
+++ b/av1/common/av1_inv_txfm.c
@@ -12,10 +12,10 @@
#include <math.h>
#include <string.h>
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_inv_txfm.h"
-void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
int i;
@@ -67,8 +67,7 @@
}
}
-void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
- int dest_stride) {
+void av1_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
int i;
tran_high_t a1, e1;
tran_low_t tmp[4];
@@ -94,7 +93,7 @@
}
}
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step[4];
tran_high_t temp1, temp2;
// stage 1
@@ -114,7 +113,7 @@
output[3] = WRAPLOW(step[0] - step[3]);
}
-void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
int i, j;
@@ -122,7 +121,7 @@
// Rows
for (i = 0; i < 4; ++i) {
- vp10_idct4_c(input, outptr);
+ av1_idct4_c(input, outptr);
input += 4;
outptr += 4;
}
@@ -130,7 +129,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_idct4_c(temp_in, temp_out);
+ av1_idct4_c(temp_in, temp_out);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 4));
@@ -138,8 +137,8 @@
}
}
-void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
- int dest_stride) {
+void av1_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+ int dest_stride) {
int i;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -155,7 +154,7 @@
}
}
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
@@ -209,7 +208,7 @@
output[7] = WRAPLOW(step1[0] - step1[7]);
}
-void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
int i, j;
@@ -217,7 +216,7 @@
// First transform rows
for (i = 0; i < 8; ++i) {
- vp10_idct8_c(input, outptr);
+ av1_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
@@ -225,7 +224,7 @@
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_idct8_c(temp_in, temp_out);
+ av1_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -233,7 +232,7 @@
}
}
-void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -245,7 +244,7 @@
}
}
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
@@ -282,7 +281,7 @@
output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3));
}
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output) {
int s0, s1, s2, s3, s4, s5, s6, s7;
tran_high_t x0 = input[7];
@@ -359,7 +358,7 @@
output[7] = WRAPLOW(-x1);
}
-void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -368,7 +367,7 @@
// First transform rows
// only first 4 row has non-zero coefs
for (i = 0; i < 4; ++i) {
- vp10_idct8_c(input, outptr);
+ av1_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
@@ -376,7 +375,7 @@
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_idct8_c(temp_in, temp_out);
+ av1_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -384,7 +383,7 @@
}
}
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
@@ -549,8 +548,8 @@
output[15] = WRAPLOW(step2[0] - step2[15]);
}
-void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
int i, j;
@@ -558,7 +557,7 @@
// First transform rows
for (i = 0; i < 16; ++i) {
- vp10_idct16_c(input, outptr);
+ av1_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
@@ -566,7 +565,7 @@
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_idct16_c(temp_in, temp_out);
+ av1_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -574,7 +573,7 @@
}
}
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -745,8 +744,8 @@
output[15] = WRAPLOW(-x1);
}
-void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -755,7 +754,7 @@
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
- vp10_idct16_c(input, outptr);
+ av1_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
@@ -763,7 +762,7 @@
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_idct16_c(temp_in, temp_out);
+ av1_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -771,8 +770,7 @@
}
}
-void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -784,7 +782,7 @@
}
}
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[32], step2[32];
tran_high_t temp1, temp2;
@@ -1151,8 +1149,8 @@
output[31] = WRAPLOW(step1[0] - step1[31]);
}
-void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
int i, j;
@@ -1170,7 +1168,7 @@
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
if (zero_coeff[0] | zero_coeff[1])
- vp10_idct32_c(input, outptr);
+ av1_idct32_c(input, outptr);
else
memset(outptr, 0, sizeof(tran_low_t) * 32);
input += 32;
@@ -1180,7 +1178,7 @@
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vp10_idct32_c(temp_in, temp_out);
+ av1_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1188,8 +1186,8 @@
}
}
-void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -1198,7 +1196,7 @@
// Rows
// only upper-left 8x8 has non-zero coeff
for (i = 0; i < 8; ++i) {
- vp10_idct32_c(input, outptr);
+ av1_idct32_c(input, outptr);
input += 32;
outptr += 32;
}
@@ -1206,7 +1204,7 @@
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vp10_idct32_c(temp_in, temp_out);
+ av1_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1214,8 +1212,7 @@
}
}
-void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
@@ -1229,9 +1226,9 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
int i;
@@ -1287,8 +1284,8 @@
}
}
-void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
- int dest_stride, int bd) {
+void av1_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+ int dest_stride, int bd) {
int i;
tran_high_t a1, e1;
tran_low_t tmp[4];
@@ -1320,7 +1317,7 @@
}
}
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step[4];
tran_high_t temp1, temp2;
(void)bd;
@@ -1341,8 +1338,8 @@
output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
}
-void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
int i, j;
@@ -1351,7 +1348,7 @@
// Rows
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct4_c(input, outptr, bd);
+ av1_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
@@ -1359,7 +1356,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_highbd_idct4_c(temp_in, temp_out, bd);
+ av1_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1367,8 +1364,8 @@
}
}
-void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
- int dest_stride, int bd) {
+void av1_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+ int dest_stride, int bd) {
int i;
tran_high_t a1;
tran_low_t out =
@@ -1387,7 +1384,7 @@
}
}
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
@@ -1405,7 +1402,7 @@
step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd);
// stage 2 & stage 3 - even half
- vp10_highbd_idct4_c(step1, step1, bd);
+ av1_highbd_idct4_c(step1, step1, bd);
// stage 2 - odd half
step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1432,8 +1429,8 @@
output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
}
-void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
int i, j;
@@ -1442,7 +1439,7 @@
// First transform rows.
for (i = 0; i < 8; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -1450,7 +1447,7 @@
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1458,8 +1455,8 @@
}
}
-void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
int i, j;
tran_high_t a1;
tran_low_t out =
@@ -1473,7 +1470,7 @@
}
}
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
@@ -1511,7 +1508,7 @@
output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd);
}
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[7];
@@ -1588,8 +1585,8 @@
output[7] = HIGHBD_WRAPLOW(-x1, bd);
}
-void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -1599,14 +1596,14 @@
// First transform rows.
// Only first 4 row has non-zero coefs.
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1614,7 +1611,7 @@
}
}
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
(void)bd;
@@ -1780,8 +1777,8 @@
output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
}
-void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
int i, j;
@@ -1790,7 +1787,7 @@
// First transform rows.
for (i = 0; i < 16; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -1798,7 +1795,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1806,8 +1803,7 @@
}
}
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
- int bd) {
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -1977,8 +1973,8 @@
output[15] = HIGHBD_WRAPLOW(-x1, bd);
}
-void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -1988,7 +1984,7 @@
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -1996,7 +1992,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2004,8 +2000,8 @@
}
}
-void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
int i, j;
tran_high_t a1;
tran_low_t out =
@@ -2389,8 +2385,8 @@
output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
}
-void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
int i, j;
@@ -2427,8 +2423,8 @@
}
}
-void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -2453,8 +2449,8 @@
}
}
-void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
int i, j;
int a1;
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -2469,4 +2465,4 @@
dest += stride;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_inv_txfm.h b/av1/common/av1_inv_txfm.h
similarity index 76%
rename from av1/common/vp10_inv_txfm.h
rename to av1/common/av1_inv_txfm.h
index b53db48..4295aa0 100644
--- a/av1/common/vp10_inv_txfm.h
+++ b/av1/common/av1_inv_txfm.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_INV_TXFM_H_
-#define VPX_DSP_INV_TXFM_H_
+#ifndef AOM_DSP_INV_TXFM_H_
+#define AOM_DSP_INV_TXFM_H_
#include <assert.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
@@ -41,7 +41,7 @@
return rv;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
#if CONFIG_COEFFICIENT_RANGE_CHECKING
// For valid highbitdepth streams, intermediate stage coefficients will
@@ -63,7 +63,7 @@
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
return rv;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EMULATE_HARDWARE
// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -84,36 +84,36 @@
// bd of x uses trans_low with 8+x bits, need to remove 24-x bits
#define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_WRAPLOW(x, bd) \
((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else // CONFIG_EMULATE_HARDWARE
#define WRAPLOW(x) ((int32_t)check_range(x))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EMULATE_HARDWARE
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
int bd) {
@@ -129,4 +129,4 @@
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_INV_TXFM_H_
+#endif // AOM_DSP_INV_TXFM_H_
diff --git a/av1/common/vp10_inv_txfm1d.c b/av1/common/av1_inv_txfm1d.c
similarity index 98%
rename from av1/common/vp10_inv_txfm1d.c
rename to av1/common/av1_inv_txfm1d.c
index 76fb623..dbb463f 100644
--- a/av1/common/vp10_inv_txfm1d.c
+++ b/av1/common/av1_inv_txfm1d.c
@@ -9,7 +9,7 @@
*/
#include <stdlib.h>
-#include "av1/common/vp10_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d.h"
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#define range_check(stage, input, buf, size, bit) \
{ \
@@ -40,8 +40,8 @@
}
#endif
-void vp10_idct4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range) {
const int32_t size = 4;
const int32_t *cospi;
@@ -83,8 +83,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_idct8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range) {
const int32_t size = 8;
const int32_t *cospi;
@@ -168,8 +168,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_idct16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 16;
const int32_t *cospi;
@@ -339,8 +339,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_idct32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 32;
const int32_t *cospi;
@@ -700,8 +700,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 4;
const int32_t *cospi;
@@ -765,8 +765,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 8;
const int32_t *cospi;
@@ -880,8 +880,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 16;
const int32_t *cospi;
@@ -1097,8 +1097,8 @@
range_check(stage, input, bf1, size, stage_range[stage]);
}
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int32_t size = 32;
const int32_t *cospi;
diff --git a/av1/common/av1_inv_txfm1d.h b/av1/common/av1_inv_txfm1d.h
new file mode 100644
index 0000000..5937617
--- /dev/null
+++ b/av1/common/av1_inv_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_INV_TXFM1D_H_
+#define AV1_INV_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range);
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+ const int8_t *stage_range);
+void av1_idct16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AV1_INV_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
similarity index 79%
rename from av1/common/vp10_inv_txfm2d.c
rename to av1/common/av1_inv_txfm2d.c
index 60606c9..844a38a 100644
--- a/av1/common/vp10_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -8,22 +8,22 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/vp10_inv_txfm1d.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/av1_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
static INLINE TxfmFunc inv_txfm_type_to_func(TXFM_TYPE txfm_type) {
switch (txfm_type) {
- case TXFM_TYPE_DCT4: return vp10_idct4_new;
- case TXFM_TYPE_DCT8: return vp10_idct8_new;
- case TXFM_TYPE_DCT16: return vp10_idct16_new;
- case TXFM_TYPE_DCT32: return vp10_idct32_new;
- case TXFM_TYPE_ADST4: return vp10_iadst4_new;
- case TXFM_TYPE_ADST8: return vp10_iadst8_new;
- case TXFM_TYPE_ADST16: return vp10_iadst16_new;
- case TXFM_TYPE_ADST32: return vp10_iadst32_new;
+ case TXFM_TYPE_DCT4: return av1_idct4_new;
+ case TXFM_TYPE_DCT8: return av1_idct8_new;
+ case TXFM_TYPE_DCT16: return av1_idct16_new;
+ case TXFM_TYPE_DCT32: return av1_idct32_new;
+ case TXFM_TYPE_ADST4: return av1_iadst4_new;
+ case TXFM_TYPE_ADST8: return av1_iadst8_new;
+ case TXFM_TYPE_ADST16: return av1_iadst16_new;
+ case TXFM_TYPE_ADST32: return av1_iadst32_new;
default: assert(0); return NULL;
}
}
@@ -62,14 +62,14 @@
};
#endif
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size) {
TXFM_2D_FLIP_CFG cfg;
set_flip_cfg(tx_type, &cfg);
cfg.cfg = inv_txfm_cfg_ls[tx_type][tx_size];
return cfg;
}
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(int tx_type) {
TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL };
switch (tx_type) {
case DCT_DCT:
@@ -130,62 +130,62 @@
}
}
-void vp10_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
+ int stride, int tx_type, int bd) {
int txfm_buf[4 * 4 + 4 + 4];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
- TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_4X4);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_4X4);
inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
clamp_block((int16_t *)output, 4, stride, 0, (1 << bd) - 1);
}
-void vp10_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
+ int stride, int tx_type, int bd) {
int txfm_buf[8 * 8 + 8 + 8];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
- TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_8X8);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_8X8);
inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
clamp_block((int16_t *)output, 8, stride, 0, (1 << bd) - 1);
}
-void vp10_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
+ int stride, int tx_type, int bd) {
int txfm_buf[16 * 16 + 16 + 16];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
- TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_16X16);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_16X16);
inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
clamp_block((int16_t *)output, 16, stride, 0, (1 << bd) - 1);
}
-void vp10_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
+ int stride, int tx_type, int bd) {
int txfm_buf[32 * 32 + 32 + 32];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
- TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_32X32);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_32X32);
inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1);
}
-void vp10_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
+ int stride, int tx_type, int bd) {
int txfm_buf[64 * 64 + 64 + 64];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
- TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_64x64_cfg(tx_type);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_64x64_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1);
}
diff --git a/av1/common/vp10_inv_txfm2d_cfg.h b/av1/common/av1_inv_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_inv_txfm2d_cfg.h
rename to av1/common/av1_inv_txfm2d_cfg.h
index 9bfa420..ee018fb 100644
--- a/av1/common/vp10_inv_txfm2d_cfg.h
+++ b/av1/common/av1_inv_txfm2d_cfg.h
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_INV_TXFM2D_CFG_H_
-#define VP10_INV_TXFM2D_CFG_H_
-#include "av1/common/vp10_inv_txfm1d.h"
+#ifndef AV1_INV_TXFM2D_CFG_H_
+#define AV1_INV_TXFM2D_CFG_H_
+#include "av1/common/av1_inv_txfm1d.h"
// ---------------- config inv_dct_dct_4 ----------------
static const int8_t inv_shift_dct_dct_4[2] = { 0, -4 };
static const int8_t inv_stage_range_col_dct_dct_4[4] = { 18, 18, 17, 17 };
@@ -441,4 +441,4 @@
TXFM_TYPE_DCT32
}; // .txfm_type_row
-#endif // VP10_INV_TXFM2D_CFG_H_
+#endif // AV1_INV_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_rtcd.c b/av1/common/av1_rtcd.c
similarity index 85%
rename from av1/common/vp10_rtcd.c
rename to av1/common/av1_rtcd.c
index 7fce6b9..fad509c 100644
--- a/av1/common/vp10_rtcd.c
+++ b/av1/common/av1_rtcd.c
@@ -7,12 +7,12 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#define RTCD_C
-#include "./vp10_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./av1_rtcd.h"
+#include "aom_ports/aom_once.h"
-void vp10_rtcd() {
+void av1_rtcd() {
// TODO(JBB): Remove this once, by insuring that both the encoder and
// decoder setup functions are protected by once();
once(setup_rtcd_internal);
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
new file mode 100644
index 0000000..c1b0f9e
--- /dev/null
+++ b/av1/common/av1_rtcd_defs.pl
@@ -0,0 +1,912 @@
+sub av1_common_forward_decls() {
+print <<EOF
+/*
+ * AV1
+ */
+
+#include "aom/aom_integer.h"
+#include "av1/common/common.h"
+#include "av1/common/enums.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/filter.h"
+#include "av1/common/av1_txfm.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct aom_variance_vtable;
+struct search_site_config;
+struct mv;
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls qw/av1_common_forward_decls/;
+
+# functions that are 64 bit only.
+$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
+if ($opts{arch} eq "x86_64") {
+ $mmx_x86_64 = 'mmx';
+ $sse2_x86_64 = 'sse2';
+ $ssse3_x86_64 = 'ssse3';
+ $avx_x86_64 = 'avx';
+ $avx2_x86_64 = 'avx2';
+}
+
+#
+# 10/12-tap convolution filters
+#
+add_proto qw/void av1_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_horiz ssse3/;
+
+add_proto qw/void av1_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_vert ssse3/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void av1_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+ specialize qw/av1_highbd_convolve_horiz sse4_1/;
+ add_proto qw/void av1_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+ specialize qw/av1_highbd_convolve_vert sse4_1/;
+}
+
+#
+# dct
+#
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ # Note as optimized versions of these functions are added we need to add a check to ensure
+ # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add/;
+
+ add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x8_32_add/;
+
+ add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x4_32_add/;
+
+ add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x16_128_add/;
+
+ add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x8_128_add/;
+
+ add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x32_512_add/;
+
+ add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht32x16_512_add/;
+
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add/;
+
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add/;
+
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4/;
+
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1/;
+
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8/;
+
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1/;
+
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16/;
+
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1/;
+
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32/;
+
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd/;
+
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1/;
+
+ add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct4x4/;
+
+ add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8/;
+
+ add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8_1/;
+
+ add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16/;
+
+ add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16_1/;
+
+ add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32/;
+
+ add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_rd/;
+
+ add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_1/;
+ } else {
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add sse2/;
+
+ add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x8_32_add/;
+
+ add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x4_32_add/;
+
+ add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x16_128_add/;
+
+ add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x8_128_add/;
+
+ add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x32_512_add/;
+
+ add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht32x16_512_add/;
+
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add sse2/;
+
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add sse2/;
+
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4 sse2/;
+
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1 sse2/;
+
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8 sse2/;
+
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1 sse2/;
+
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16 sse2/;
+
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1 sse2/;
+
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32 sse2/;
+
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd sse2/;
+
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1 sse2/;
+
+ add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct4x4 sse2/;
+
+ add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8 sse2/;
+
+ add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8_1/;
+
+ add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16 sse2/;
+
+ add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16_1/;
+
+ add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32 sse2/;
+
+ add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_rd sse2/;
+
+ add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_1/;
+ }
+} else {
+ # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add/;
+
+ add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x8_32_add/;
+
+ add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x4_32_add/;
+
+ add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x16_128_add/;
+
+ add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x8_128_add/;
+
+ add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x32_512_add/;
+
+ add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht32x16_512_add/;
+
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add/;
+
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add/;
+
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4/;
+
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1/;
+
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8/;
+
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1/;
+
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16/;
+
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1/;
+
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32/;
+
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd/;
+
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1/;
+ } else {
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add sse2 neon dspr2/;
+
+ add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x8_32_add/;
+
+ add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x4_32_add/;
+
+ add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x16_128_add/;
+
+ add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x8_128_add/;
+
+ add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht16x32_512_add/;
+
+ add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht32x16_512_add/;
+
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add sse2 neon dspr2/;
+
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add sse2 dspr2/;
+
+ if (aom_config("CONFIG_EXT_TX") ne "yes") {
+ specialize qw/av1_iht4x4_16_add msa/;
+ specialize qw/av1_iht8x8_64_add msa/;
+ specialize qw/av1_iht16x16_256_add msa/;
+ }
+
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4 sse2/;
+
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1 sse2/;
+
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8 sse2/;
+
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1 sse2/;
+
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16 sse2/;
+
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1 sse2/;
+
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32 sse2/;
+
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd sse2/;
+
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1 sse2/;
+ }
+}
+
+if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+ add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/quantize_nuq/;
+
+ add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/quantize_fp_nuq/;
+
+ add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/quantize_32x32_nuq/;
+
+ add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/quantize_32x32_fp_nuq/;
+}
+
+# EXT_INTRA predictor functions
+if (aom_config("CONFIG_EXT_INTRA") eq "yes") {
+ add_proto qw/void av1_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_dc_filter_predictor sse4_1/;
+ add_proto qw/void av1_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_v_filter_predictor sse4_1/;
+ add_proto qw/void av1_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_h_filter_predictor sse4_1/;
+ add_proto qw/void av1_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d45_filter_predictor sse4_1/;
+ add_proto qw/void av1_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d135_filter_predictor sse4_1/;
+ add_proto qw/void av1_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d117_filter_predictor sse4_1/;
+ add_proto qw/void av1_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d153_filter_predictor sse4_1/;
+ add_proto qw/void av1_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d207_filter_predictor sse4_1/;
+ add_proto qw/void av1_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_d63_filter_predictor sse4_1/;
+ add_proto qw/void av1_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+ specialize qw/av1_tm_filter_predictor sse4_1/;
+ # High bitdepth functions
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void av1_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_dc_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_v_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_h_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d45_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d135_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d117_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d153_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d207_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_d63_filter_predictor sse4_1/;
+ add_proto qw/void av1_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/av1_highbd_tm_filter_predictor sse4_1/;
+ }
+}
+
+# High bitdepth functions
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ #
+ # Sub Pixel Filters
+ #
+ add_proto qw/void av1_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve_copy/;
+
+ add_proto qw/void av1_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve_avg/;
+
+ add_proto qw/void av1_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8/, "$sse2_x86_64";
+
+ add_proto qw/void av1_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_horiz/, "$sse2_x86_64";
+
+ add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
+
+ add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
+
+ add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+
+ add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+
+ #
+ # dct
+ #
+ # Note as optimized versions of these functions are added we need to add a check to ensure
+ # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+ add_proto qw/void av1_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht4x4_16_add/;
+
+ add_proto qw/void av1_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht4x8_32_add/;
+
+ add_proto qw/void av1_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht8x4_32_add/;
+
+ add_proto qw/void av1_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht8x16_128_add/;
+
+ add_proto qw/void av1_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht16x8_128_add/;
+
+ add_proto qw/void av1_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht16x32_512_add/;
+
+ add_proto qw/void av1_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht32x16_512_add/;
+
+ add_proto qw/void av1_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht8x8_64_add/;
+
+ add_proto qw/void av1_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
+ specialize qw/av1_highbd_iht16x16_256_add/;
+}
+
+#
+# Encoder functions below this point.
+#
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+
+# ENCODEMB INVOKE
+
+if (aom_config("CONFIG_AOM_QM") eq "yes") {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ # the transform coefficients are held in 32-bit
+ # values, so the assembler code for av1_block_error can no longer be used.
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error/;
+
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ specialize qw/av1_fdct8x8_quant/;
+ } else {
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
+
+ add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+ specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
+
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ }
+} else {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ # the transform coefficients are held in 32-bit
+ # values, so the assembler code for av1_block_error can no longer be used.
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error/;
+
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp/;
+
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp_32x32/;
+
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_fdct8x8_quant/;
+ } else {
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error sse2 avx2 msa/;
+
+ add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+ specialize qw/av1_block_error_fp neon sse2/;
+
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp neon sse2/, "$ssse3_x86_64";
+
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp_32x32/, "$ssse3_x86_64";
+
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_fdct8x8_quant sse2 ssse3 neon/;
+ }
+
+}
+
+# fdct functions
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x4 sse2/;
+
+ add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x8/;
+
+ add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x4/;
+
+ add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x16/;
+
+ add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x8/;
+
+ add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x32/;
+
+ add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht32x16/;
+
+ add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x8 sse2/;
+
+ add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x16 sse2/;
+
+ add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht32x32/;
+
+ add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fwht4x4/;
+} else {
+ add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x4 sse2/;
+
+ add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x8/;
+
+ add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x4/;
+
+ add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x16/;
+
+ add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x8/;
+
+ add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x32/;
+
+ add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht32x16/;
+
+ add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x8 sse2/;
+
+ add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x16 sse2/;
+
+ if (aom_config("CONFIG_EXT_TX") ne "yes") {
+ specialize qw/av1_fht4x4 msa/;
+ specialize qw/av1_fht8x8 msa/;
+ specialize qw/av1_fht16x16 msa/;
+ }
+
+ add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht32x32/;
+
+ add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fwht4x4/;
+}
+
+add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
+ specialize qw/av1_fwd_idtx/;
+
+# Inverse transform
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ # Note as optimized versions of these functions are added we need to add a check to ensure
+ # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add/;
+
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add/;
+
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add/;
+
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add/;
+
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add/;
+
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add/;
+
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add/;
+
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add/;
+
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add/;
+
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add/;
+
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add/;
+
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
+
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
+
+ add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_1_add/;
+
+ add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_1_add/;
+
+ add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_1_add/;
+
+ add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_1024_add/;
+
+ add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_34_add/;
+
+ add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_1_add/;
+
+ add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_iwht4x4_1_add/;
+
+ add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_iwht4x4_16_add/;
+
+ # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_16_add/;
+
+ add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_64_add/;
+
+ add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_10_add/;
+
+ add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_256_add/;
+
+ add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_10_add/;
+ } else {
+ add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_16_add sse2/;
+
+ add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_64_add sse2/;
+
+ add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_10_add sse2/;
+
+ add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_256_add sse2/;
+
+ add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_10_add sse2/;
+ } # CONFIG_EMULATE_HARDWARE
+} else {
+ # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add/;
+
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add/;
+
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add/;
+
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add/;
+
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add/;
+
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add/;
+
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add/;
+
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add/;
+
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add/;
+
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add/;
+
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add/;
+
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
+
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
+ } else {
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add sse2/;
+
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add sse2/;
+
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add sse2/;
+
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add sse2/;
+
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add sse2/;
+
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add sse2/;
+
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add sse2/;
+
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add sse2/;
+
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add sse2/;
+
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add sse2/;
+
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add sse2/;
+
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
+
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
+ } # CONFIG_EMULATE_HARDWARE
+} # CONFIG_AOM_HIGHBITDEPTH
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ #fwd txfm
+ add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_fwd_txfm2d_4x4 sse4_1/;
+ add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_fwd_txfm2d_8x8 sse4_1/;
+ add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_fwd_txfm2d_16x16 sse4_1/;
+ add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
+ add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
+
+ #inv txfm
+ add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_inv_txfm2d_add_4x4 sse4_1/;
+ add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_inv_txfm2d_add_8x8 sse4_1/;
+ add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_inv_txfm2d_add_16x16 sse4_1/;
+ add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_inv_txfm2d_add_32x32/;
+ add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ specialize qw/av1_inv_txfm2d_add_64x64/;
+}
+
+#
+# Motion search
+#
+add_proto qw/int av1_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
+specialize qw/av1_full_search_sad sse3 sse4_1/;
+$av1_full_search_sad_sse3=av1_full_search_sadx3;
+$av1_full_search_sad_sse4_1=av1_full_search_sadx8;
+
+add_proto qw/int av1_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_diamond_search_sad/;
+
+add_proto qw/int av1_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_full_range_search/;
+
+add_proto qw/void av1_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+specialize qw/av1_temporal_filter_apply sse2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+
+ # ENCODEMB INVOKE
+ if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+ add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/highbd_quantize_nuq/;
+
+ add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/highbd_quantize_fp_nuq/;
+
+ add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/highbd_quantize_32x32_nuq/;
+
+ add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+ specialize qw/highbd_quantize_32x32_fp_nuq/;
+ }
+
+ add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
+ specialize qw/av1_highbd_block_error sse2/;
+
+ if (aom_config("CONFIG_AOM_QM") eq "yes") {
+ add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+ add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ } else {
+ add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+ specialize qw/av1_highbd_quantize_fp sse4_1/;
+
+ add_proto qw/void av1_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+ specialize qw/av1_highbd_quantize_b/;
+ }
+
+ # fdct functions
+ add_proto qw/void av1_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht4x4 sse4_1/;
+
+ add_proto qw/void av1_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht4x8/;
+
+ add_proto qw/void av1_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht8x4/;
+
+ add_proto qw/void av1_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht8x16/;
+
+ add_proto qw/void av1_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht16x8/;
+
+ add_proto qw/void av1_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht16x32/;
+
+ add_proto qw/void av1_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht32x16/;
+
+ add_proto qw/void av1_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht8x8/;
+
+ add_proto qw/void av1_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht16x16/;
+
+ add_proto qw/void av1_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht32x32/;
+
+ add_proto qw/void av1_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fwht4x4/;
+
+ add_proto qw/void av1_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+ specialize qw/av1_highbd_temporal_filter_apply/;
+
+}
+# End av1_high encoder functions
+
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+ add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
+ specialize qw/av1_wedge_sse_from_residuals sse2/;
+ add_proto qw/int av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
+ specialize qw/av1_wedge_sign_from_residuals sse2/;
+ add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
+ specialize qw/av1_wedge_compute_delta_squares sse2/;
+}
+
+}
+# end encoder functions
+1;
diff --git a/av1/common/vp10_txfm.h b/av1/common/av1_txfm.h
similarity index 96%
rename from av1/common/vp10_txfm.h
rename to av1/common/av1_txfm.h
index bfeb3ea..289f953 100644
--- a/av1/common/vp10_txfm.h
+++ b/av1/common/av1_txfm.h
@@ -7,16 +7,16 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_TXFM_H_
-#define VP10_TXFM_H_
+#ifndef AV1_TXFM_H_
+#define AV1_TXFM_H_
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
static const int cos_bit_min = 10;
static const int cos_bit_max = 16;
@@ -198,10 +198,10 @@
#ifdef __cplusplus
extern "C" {
#endif
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size);
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type);
#ifdef __cplusplus
}
#endif // __cplusplus
-#endif // VP10_TXFM_H_
+#endif // AV1_TXFM_H_
diff --git a/av1/common/blockd.c b/av1/common/blockd.c
index ee95271..b13e562 100644
--- a/av1/common/blockd.c
+++ b/av1/common/blockd.c
@@ -14,8 +14,8 @@
#include "av1/common/blockd.h"
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *left_mi, int b) {
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *left_mi, int b) {
if (b == 0 || b == 2) {
if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
@@ -26,8 +26,8 @@
}
}
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *above_mi, int b) {
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *above_mi, int b) {
if (b == 0 || b == 1) {
if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
@@ -38,7 +38,7 @@
}
}
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -81,18 +81,18 @@
}
}
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
- BLOCK_SIZE bsize,
- foreach_transformed_block_visitor visit,
- void *arg) {
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit,
+ void *arg) {
int plane;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
}
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
- int aoff, int loff) {
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+ int aoff, int loff) {
ENTROPY_CONTEXT *const a = pd->above_context + aoff;
ENTROPY_CONTEXT *const l = pd->left_context + loff;
const int tx_w_in_blocks = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -128,7 +128,7 @@
}
}
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
@@ -151,7 +151,7 @@
// Returns whether filter selection is needed for a given
// intra prediction angle.
-int vp10_is_intra_filter_switchable(int angle) {
+int av1_is_intra_filter_switchable(int angle) {
assert(angle > 0 && angle < 270);
if (angle % 45 == 0) return 0;
if (angle > 90 && angle < 180) {
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 4dcc1f0..327a8d0 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_BLOCKD_H_
-#define VP10_COMMON_BLOCKD_H_
+#ifndef AV1_COMMON_BLOCKD_H_
+#define AV1_COMMON_BLOCKD_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
@@ -39,7 +39,7 @@
} FRAME_TYPE;
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-#define IsInterpolatingFilter(filter) (vp10_is_interpolating_filter(filter))
+#define IsInterpolatingFilter(filter) (av1_is_interpolating_filter(filter))
#else
#define IsInterpolatingFilter(filter) (1)
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
@@ -158,11 +158,11 @@
// Number of base colors for Y (0) and UV (1)
uint8_t palette_size[2];
// Value of base colors for Y, U, and V
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
#else
uint8_t palette_colors[3 * PALETTE_MAX_SIZE];
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Only used by encoder to store the color index of the top left pixel.
// TODO(huisu): move this to encoder
uint8_t palette_first_color_idx[2];
@@ -260,11 +260,11 @@
return mbmi->ref_frame[1] > INTRA_FRAME;
}
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *left_mi, int b);
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *left_mi, int b);
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *above_mi, int b);
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *above_mi, int b);
enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
@@ -314,7 +314,7 @@
typedef struct RefBuffer {
// TODO(dkovalev): idx is not really required and should be removed, now it
- // is used in vp10_onyxd_if.c
+ // is used in av1_onyxd_if.c
int idx;
YV12_BUFFER_CONFIG *buf;
struct scale_factors sf;
@@ -339,7 +339,7 @@
int up_available;
int left_available;
- const vpx_prob (*partition_probs)[PARTITION_TYPES - 1];
+ const aom_prob (*partition_probs)[PARTITION_TYPES - 1];
/* Distance of MB away from frame edges */
int mb_to_left_edge;
@@ -381,7 +381,7 @@
uint8_t is_sec_rect;
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
/* Bit depth: 8, 10, 12 */
int bd;
#endif
@@ -389,7 +389,7 @@
int lossless[MAX_SEGMENTS];
int corrupted;
- struct vpx_internal_error_info *error_info;
+ struct aom_internal_error_info *error_info;
#if CONFIG_GLOBAL_MOTION
Global_Motion_Params *global_motion;
#endif // CONFIG_GLOBAL_MOTION
@@ -419,7 +419,7 @@
#if CONFIG_SUPERTX
static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
return (int)txsize_sqr_map[mbmi->tx_size] >
- VPXMIN(b_width_log2_lookup[mbmi->sb_type],
+ AOMMIN(b_width_log2_lookup[mbmi->sb_type],
b_height_log2_lookup[mbmi->sb_type]);
}
#endif // CONFIG_SUPERTX
@@ -567,7 +567,7 @@
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (!is_inter) {
- return VPXMIN(max_tx_size, largest_tx_size);
+ return AOMMIN(max_tx_size, largest_tx_size);
} else {
const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
if (txsize_sqr_up_map[max_rect_tx_size] <= largest_tx_size) {
@@ -578,7 +578,7 @@
}
#else
(void)is_inter;
- return VPXMIN(max_tx_size, largest_tx_size);
+ return AOMMIN(max_tx_size, largest_tx_size);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
}
@@ -606,7 +606,7 @@
ADST_ADST, // FILTER_TM
};
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_TILE
@@ -718,7 +718,7 @@
#endif // CONFIG_EXT_TX
}
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
int xss, int yss) {
@@ -726,7 +726,7 @@
return TX_4X4;
} else {
const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss];
- return VPXMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
+ return AOMMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
}
}
@@ -763,18 +763,18 @@
BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg);
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg);
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
- BLOCK_SIZE bsize,
- foreach_transformed_block_visitor visit,
- void *arg);
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit,
+ void *arg);
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
- int aoff, int loff);
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+ int aoff, int loff);
#if CONFIG_EXT_INTER
static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
@@ -830,4 +830,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_BLOCKD_H_
+#endif // AV1_COMMON_BLOCKD_H_
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index bba40cb..2309391 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -28,9 +28,9 @@
#define BS (MI_SIZE * MAX_MIB_SIZE)
// Iterate over blocks within a superblock
-static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
- const VP10_COMMON *cm, MACROBLOCKD *xd,
- MODE_INFO *const *mi_8x8, int xpos, int ypos) {
+static void av1_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
+ const AV1_COMMON *cm, MACROBLOCKD *xd,
+ MODE_INFO *const *mi_8x8, int xpos, int ypos) {
// Temporary buffer (to allow SIMD parallelism)
uint8_t buf_unaligned[BS * BS + 15];
uint8_t *buf = (uint8_t *)(((intptr_t)buf_unaligned + 15) & ~15);
@@ -56,7 +56,7 @@
has_bottom &= y != MAX_MIB_SIZE - 1;
has_right &= x != MAX_MIB_SIZE - 1;
#endif
- vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+ av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
clpf_block(
xd->plane[p].dst.buf, CLPF_ALLOW_PIXEL_PARALLELISM
? buf + y * MI_SIZE * BS + x * MI_SIZE
@@ -74,7 +74,7 @@
for (x = 0; x < MAX_MIB_SIZE && xpos + x < cm->mi_cols; x++) {
const MB_MODE_INFO *mbmi =
&mi_8x8[(ypos + y) * cm->mi_stride + xpos + x]->mbmi;
- vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+ av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
if (!mbmi->skip) {
int i = 0;
for (i = 0; i<MI_SIZE>> xd->plane[p].subsampling_y; i++)
@@ -89,11 +89,11 @@
}
// Iterate over the superblocks of an entire frame
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
- MACROBLOCKD *xd) {
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+ MACROBLOCKD *xd) {
int x, y;
for (y = 0; y < cm->mi_rows; y += MAX_MIB_SIZE)
for (x = 0; x < cm->mi_cols; x += MAX_MIB_SIZE)
- vp10_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
+ av1_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
}
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index 5b9d55b..85f29d9 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -3,8 +3,8 @@
(Replace with proper AOM header)
*/
-#ifndef VP10_COMMON_CLPF_H_
-#define VP10_COMMON_CLPF_H_
+#ifndef AV1_COMMON_CLPF_H_
+#define AV1_COMMON_CLPF_H_
#include "av1/common/reconinter.h"
@@ -16,7 +16,7 @@
#define CLPF_FILTER_ALL_PLANES \
0 // 1 = filter both luma and chroma, 0 = filter only luma
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
- MACROBLOCKD *xd);
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+ MACROBLOCKD *xd);
#endif
diff --git a/av1/common/common.h b/av1/common/common.h
index 4e30034..c333a17 100644
--- a/av1/common/common.h
+++ b/av1/common/common.h
@@ -8,17 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_COMMON_H_
-#define VP10_COMMON_COMMON_H_
+#ifndef AV1_COMMON_COMMON_H_
+#define AV1_COMMON_COMMON_H_
/* Interface header for common constant data structures and lookup tables */
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
#include "aom_ports/bitops.h"
#ifdef __cplusplus
@@ -28,21 +28,21 @@
#define PI 3.141592653589793238462643383279502884
// Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src) \
+#define av1_copy(dest, src) \
{ \
assert(sizeof(dest) == sizeof(src)); \
memcpy(dest, src, sizeof(src)); \
}
// Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n) \
+#define av1_copy_array(dest, src, n) \
{ \
assert(sizeof(*(dest)) == sizeof(*(src))); \
memcpy(dest, src, n * sizeof(*(src))); \
}
-#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
+#define av1_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
static INLINE int get_unsigned_bits(unsigned int num_values) {
return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -53,7 +53,7 @@
do { \
lval = (expr); \
if (!lval) \
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
} while (0)
@@ -62,19 +62,19 @@
do { \
lval = (expr); \
if (!lval) \
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
#endif
// TODO(yaowu: validate the usage of these codes or develop new ones.)
-#define VP10_SYNC_CODE_0 0x49
-#define VP10_SYNC_CODE_1 0x83
-#define VP10_SYNC_CODE_2 0x43
+#define AV1_SYNC_CODE_0 0x49
+#define AV1_SYNC_CODE_1 0x83
+#define AV1_SYNC_CODE_2 0x43
-#define VPX_FRAME_MARKER 0x2
+#define AOM_FRAME_MARKER 0x2
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_COMMON_H_
+#endif // AV1_COMMON_COMMON_H_
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index 4348f08..1fdabfa 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_COMMON_DATA_H_
-#define VP10_COMMON_COMMON_DATA_H_
+#ifndef AV1_COMMON_COMMON_DATA_H_
+#define AV1_COMMON_COMMON_DATA_H_
#include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
#ifdef __cplusplus
extern "C" {
@@ -98,7 +98,7 @@
#endif // CONFIG_EXT_TX
};
-// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize)))
+// AOMMIN(3, AOMMIN(b_width_log2(bsize), b_height_log2(bsize)))
static const uint8_t size_group_lookup[BLOCK_SIZES] = {
0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, IF_EXT_PARTITION(3, 3, 3)
};
@@ -583,4 +583,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_COMMON_DATA_H_
+#endif // AV1_COMMON_COMMON_DATA_H_
diff --git a/av1/common/debugmodes.c b/av1/common/debugmodes.c
index 6c958a8..d4169fd 100644
--- a/av1/common/debugmodes.c
+++ b/av1/common/debugmodes.c
@@ -13,7 +13,7 @@
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
-static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
+static void log_frame_info(AV1_COMMON *cm, const char *str, FILE *f) {
fprintf(f, "%s", str);
fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
cm->show_frame, cm->base_qindex);
@@ -22,7 +22,7 @@
* and uses the passed in member offset to print out the value of an integer
* for each mbmi member value in the mi structure.
*/
-static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
+static void print_mi_data(AV1_COMMON *cm, FILE *file, const char *descriptor,
size_t member_offset) {
int mi_row, mi_col;
MODE_INFO **mi = cm->mi_grid_visible;
@@ -43,7 +43,7 @@
fprintf(file, "\n");
}
-void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
+void av1_print_modes_and_motion_vectors(AV1_COMMON *cm, const char *file) {
int mi_row;
int mi_col;
FILE *mvs = fopen(file, "a");
diff --git a/av1/common/dering.c b/av1/common/dering.c
index 7c116a2..97b31af 100644
--- a/av1/common/dering.c
+++ b/av1/common/dering.c
@@ -11,8 +11,8 @@
#include <string.h>
#include <math.h>
-#include "./vpx_scale_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_scale_rtcd.h"
+#include "aom/aom_integer.h"
#include "av1/common/dering.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/reconinter.h"
@@ -26,7 +26,7 @@
return clamp(level, gi, MAX_DERING_LEVEL - 1);
}
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
int r, c;
int maxc, maxr;
int skip = 1;
@@ -44,8 +44,8 @@
return skip;
}
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- MACROBLOCKD *xd, int global_level) {
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ MACROBLOCKD *xd, int global_level) {
int r, c;
int sbr, sbc;
int nhsb, nvsb;
@@ -56,21 +56,21 @@
int bsize[3];
int dec[3];
int pli;
- int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
+ int coeff_shift = AOMMAX(cm->bit_depth - 8, 0);
nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
- bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+ bskip = aom_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+ av1_setup_dst_planes(xd->plane, frame, 0, 0);
for (pli = 0; pli < 3; pli++) {
dec[pli] = xd->plane[pli].subsampling_x;
bsize[pli] = 8 >> dec[pli];
}
stride = bsize[0] * cm->mi_cols;
for (pli = 0; pli < 3; pli++) {
- src[pli] = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+ src[pli] = aom_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
for (r = 0; r < bsize[pli] * cm->mi_rows; ++r) {
for (c = 0; c < bsize[pli] * cm->mi_cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
src[pli][r * stride + c] = CONVERT_TO_SHORTPTR(
xd->plane[pli].dst.buf)[r * xd->plane[pli].dst.stride + c];
@@ -78,7 +78,7 @@
#endif
src[pli][r * stride + c] =
xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
@@ -95,8 +95,8 @@
for (sbc = 0; sbc < nhsb; sbc++) {
int level;
int nhb, nvb;
- nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
- nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+ nhb = AOMMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+ nvb = AOMMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
for (pli = 0; pli < 3; pli++) {
int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
int threshold;
@@ -123,7 +123,7 @@
coeff_shift);
for (r = 0; r < bsize[pli] * nvb; ++r) {
for (c = 0; c < bsize[pli] * nhb; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
[xd->plane[pli].dst.stride *
@@ -136,7 +136,7 @@
(bsize[pli] * MAX_MIB_SIZE * sbr + r) +
sbc * bsize[pli] * MAX_MIB_SIZE + c] =
dst[r * MAX_MIB_SIZE * bsize[pli] + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
@@ -145,7 +145,7 @@
}
}
for (pli = 0; pli < 3; pli++) {
- vpx_free(src[pli]);
+ aom_free(src[pli]);
}
- vpx_free(bskip);
+ aom_free(bskip);
}
diff --git a/av1/common/dering.h b/av1/common/dering.h
index de59c86..254d34f 100644
--- a/av1/common/dering.h
+++ b/av1/common/dering.h
@@ -1,10 +1,10 @@
-#ifndef VP10_COMMON_DERING_H_
-#define VP10_COMMON_DERING_H_
+#ifndef AV1_COMMON_DERING_H_
+#define AV1_COMMON_DERING_H_
#include "av1/common/od_dering.h"
#include "av1/common/onyxc_int.h"
-#include "aom/vpx_integer.h"
-#include "./vpx_config.h"
+#include "aom/aom_integer.h"
+#include "./aom_config.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
@@ -19,14 +19,14 @@
#define DERING_REFINEMENT_LEVELS 4
int compute_level_from_index(int global_level, int gi);
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col);
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- MACROBLOCKD *xd, int global_level);
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col);
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ MACROBLOCKD *xd, int global_level);
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
- VP10_COMMON *cm, MACROBLOCKD *xd);
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+ AV1_COMMON *cm, MACROBLOCKD *xd);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_DERING_H_
+#endif // AV1_COMMON_DERING_H_
diff --git a/av1/common/divide.c b/av1/common/divide.c
index f0c6730..3c82be8 100644
--- a/av1/common/divide.c
+++ b/av1/common/divide.c
@@ -25,7 +25,7 @@
}
}
*/
-const struct fastdiv_elem vp10_fastdiv_tab[256] = {
+const struct fastdiv_elem av1_fastdiv_tab[256] = {
{ 0, 0 }, { 0, 0 }, { 0, 1 },
{ 1431655766, 2 }, { 0, 2 }, { 2576980378u, 3 },
{ 1431655766, 3 }, { 613566757, 3 }, { 0, 3 },
diff --git a/av1/common/divide.h b/av1/common/divide.h
index 7de6c91..b96ad4c 100644
--- a/av1/common/divide.h
+++ b/av1/common/divide.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_DIVIDE_H_
-#define VP10_COMMON_DIVIDE_H_
+#ifndef AV1_COMMON_DIVIDE_H_
+#define AV1_COMMON_DIVIDE_H_
// An implemntation of the divide by multiply alogrithm
// https://gmplib.org/~tege/divcnst-pldi94.pdf
#include <limits.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -27,14 +27,14 @@
unsigned shift;
};
-extern const struct fastdiv_elem vp10_fastdiv_tab[256];
+extern const struct fastdiv_elem av1_fastdiv_tab[256];
static INLINE unsigned fastdiv(unsigned x, int y) {
unsigned t =
- ((uint64_t)x * vp10_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
- return (t + x) >> vp10_fastdiv_tab[y].shift;
+ ((uint64_t)x * av1_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
+ return (t + x) >> av1_fastdiv_tab[y].shift;
}
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
-#endif // VP10_COMMON_DIVIDE_H_
+#endif // AV1_COMMON_DIVIDE_H_
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index be96c42..83f8f65 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -12,12 +12,12 @@
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/entropymode.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
// Unconstrained Node Tree
/* clang-format off */
-const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
2, 6, // 0 = LOW_VAL
-TWO_TOKEN, 4, // 1 = TWO
-THREE_TOKEN, -FOUR_TOKEN, // 2 = THREE
@@ -29,30 +29,30 @@
};
/* clang-format on */
-const vpx_prob vp10_cat1_prob[] = { 159 };
-const vpx_prob vp10_cat2_prob[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
- 196, 177, 153, 140, 133, 130, 129 };
-#if CONFIG_VP9_HIGHBITDEPTH
-const vpx_prob vp10_cat1_prob_high10[] = { 159 };
-const vpx_prob vp10_cat2_prob_high10[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high10[] = {
+const aom_prob av1_cat1_prob[] = { 159 };
+const aom_prob av1_cat2_prob[] = { 165, 145 };
+const aom_prob av1_cat3_prob[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+ 196, 177, 153, 140, 133, 130, 129 };
+#if CONFIG_AOM_HIGHBITDEPTH
+const aom_prob av1_cat1_prob_high10[] = { 159 };
+const aom_prob av1_cat2_prob_high10[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high10[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high10[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high10[] = {
255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
};
-const vpx_prob vp10_cat1_prob_high12[] = { 159 };
-const vpx_prob vp10_cat2_prob_high12[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
- 254, 252, 249, 243, 230, 196,
- 177, 153, 140, 133, 130, 129 };
+const aom_prob av1_cat1_prob_high12[] = { 159 };
+const aom_prob av1_cat2_prob_high12[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high12[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high12[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+ 254, 252, 249, 243, 230, 196,
+ 177, 153, 140, 133, 130, 129 };
#endif
const uint16_t band_count_table[TX_SIZES_ALL][8] = {
@@ -75,7 +75,7 @@
#endif // CONFIG_EXT_TX
};
-const uint8_t vp10_coefband_trans_8x8plus[1024] = {
+const uint8_t av1_coefband_trans_8x8plus[1024] = {
0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
// beyond MAXBAND_INDEX+1 all values are filled as 5
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -120,18 +120,18 @@
};
#if CONFIG_EXT_TX
-const uint8_t vp10_coefband_trans_4x8_8x4[32] = {
+const uint8_t av1_coefband_trans_4x8_8x4[32] = {
0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
};
#endif // CONFIG_EXT_TX
-const uint8_t vp10_coefband_trans_4x4[16] = {
+const uint8_t av1_coefband_trans_4x4[16] = {
0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
};
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
- 4, 5, 5, 5, 5, 5 };
+const uint8_t av1_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+ 4, 5, 5, 5, 5, 5 };
// Model obtained from a 2-sided zero-centered distribution derived
// from a Pareto distribution. The cdf of the distribution is:
@@ -145,9 +145,9 @@
// Every odd line in this table can be generated from the even lines
// by averaging :
-// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
-// vp10_pareto8_full[l+1][node] ) >> 1;
-const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
+// av1_pareto8_full[l][node] = (av1_pareto8_full[l-1][node] +
+// av1_pareto8_full[l+1][node] ) >> 1;
+const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
{ 3, 86, 128, 6, 86, 23, 88, 29 },
{ 6, 86, 128, 11, 87, 42, 91, 52 },
{ 9, 86, 129, 17, 88, 61, 94, 76 },
@@ -417,7 +417,7 @@
// beta = 8
// Values for tokens ONE_TOKEN through CATEGORY6_TOKEN included here.
// ZERO_TOKEN and EOB_TOKEN are coded as flags outside this coder.
-const AnsP10 vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
+const AnsP10 av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
{ 4, 4, 4, 4, 8, 15, 30, 57, 103, 795 },
{ 8, 8, 8, 8, 15, 30, 57, 103, 168, 619 },
{ 12, 12, 12, 12, 23, 43, 80, 138, 205, 487 },
@@ -678,7 +678,7 @@
/* clang-format off */
#if CONFIG_ENTROPY
-const vp10_coeff_probs_model
+const av1_coeff_probs_model
default_qctx_coef_probs[QCTX_BINS][TX_SIZES][PLANE_TYPES] = {
{ // Q_Index 0
{ // TX_SIZE 0
@@ -2450,7 +2450,7 @@
},
};
#else
-static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -2534,7 +2534,7 @@
}
};
-static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -2618,7 +2618,7 @@
}
};
-static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -2702,7 +2702,7 @@
}
};
-static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -2788,30 +2788,30 @@
#endif // CONFIG_ENTROPY
/* clang-format on */
-static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) {
+static void extend_to_full_distribution(aom_prob *probs, aom_prob p) {
assert(p != 0);
- memcpy(probs, vp10_pareto8_full[p - 1], MODEL_NODES * sizeof(vpx_prob));
+ memcpy(probs, av1_pareto8_full[p - 1], MODEL_NODES * sizeof(aom_prob));
}
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full) {
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full) {
if (full != model)
- memcpy(full, model, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+ memcpy(full, model, sizeof(aom_prob) * UNCONSTRAINED_NODES);
extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
}
#if CONFIG_ANS
-void vp10_build_token_cdfs(const vpx_prob *pdf_model, rans_dec_lut cdf) {
+void av1_build_token_cdfs(const aom_prob *pdf_model, rans_dec_lut cdf) {
AnsP10 pdf_tab[ENTROPY_TOKENS - 1];
assert(pdf_model[2] != 0);
// TODO(aconverse): Investigate making the precision of the zero and EOB tree
// nodes 10-bits.
rans_merge_prob8_pdf(pdf_tab, pdf_model[1],
- vp10_pareto8_token_probs[pdf_model[2] - 1],
+ av1_pareto8_token_probs[pdf_model[2] - 1],
ENTROPY_TOKENS - 2);
rans_build_cdf_from_pdf(pdf_tab, cdf);
}
-void vp10_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
+void av1_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
TX_SIZE t;
int i, j, k, l;
for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -2819,42 +2819,42 @@
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
- vp10_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
- fc->coef_cdfs[t][i][j][k][l]);
+ av1_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
+ fc->coef_cdfs[t][i][j][k][l]);
}
#endif // CONFIG_ANS
-void vp10_default_coef_probs(VP10_COMMON *cm) {
+void av1_default_coef_probs(AV1_COMMON *cm) {
#if CONFIG_ENTROPY
- const int index = VPXMIN(
+ const int index = AOMMIN(
ROUND_POWER_OF_TWO(cm->base_qindex, 8 - QCTX_BIN_BITS), QCTX_BINS - 1);
- vp10_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
+ av1_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
#else
- vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
- vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
- vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
- vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
+ av1_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
+ av1_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
+ av1_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
+ av1_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
#endif // CONFIG_ENTROPY
#if CONFIG_ANS
- vp10_coef_pareto_cdfs(cm->fc);
+ av1_coef_pareto_cdfs(cm->fc);
#endif // CONFIG_ANS
}
-static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
+static void adapt_coef_probs(AV1_COMMON *cm, TX_SIZE tx_size,
unsigned int count_sat,
unsigned int update_factor) {
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
- vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
+ av1_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
#if CONFIG_ENTROPY
- const vp10_coeff_probs_model *const pre_probs =
+ const av1_coeff_probs_model *const pre_probs =
cm->partial_prob_update
- ? (const vp10_coeff_probs_model *)cm->starting_coef_probs[tx_size]
+ ? (const av1_coeff_probs_model *)cm->starting_coef_probs[tx_size]
: pre_fc->coef_probs[tx_size];
#else
- const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
+ const av1_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
#endif // CONFIG_ENTROPY
- const vp10_coeff_count_model *const counts =
- (const vp10_coeff_count_model *)cm->counts.coef[tx_size];
+ const av1_coeff_count_model *const counts =
+ (const av1_coeff_count_model *)cm->counts.coef[tx_size];
const unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
(const unsigned int(*)[
REF_TYPES][COEF_BANDS][COEFF_CONTEXTS])cm->counts.eob_branch[tx_size];
@@ -2873,12 +2873,12 @@
};
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
probs[i][j][k][l][m] =
- vp10_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
- count_sat, update_factor);
+ av1_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
+ count_sat, update_factor);
}
}
-void vp10_adapt_coef_probs(VP10_COMMON *cm) {
+void av1_adapt_coef_probs(AV1_COMMON *cm) {
TX_SIZE t;
unsigned int count_sat, update_factor;
@@ -2905,18 +2905,18 @@
for (t = TX_4X4; t <= TX_32X32; t++)
adapt_coef_probs(cm, t, count_sat, update_factor);
#if CONFIG_ANS
- vp10_coef_pareto_cdfs(cm->fc);
+ av1_coef_pareto_cdfs(cm->fc);
#endif
}
#if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(VP10_COMMON *cm, int mi_row, int mi_col) {
+void av1_partial_adapt_probs(AV1_COMMON *cm, int mi_row, int mi_col) {
(void)mi_row;
(void)mi_col;
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
cm->partial_prob_update = 1;
- vp10_adapt_coef_probs(cm);
+ av1_adapt_coef_probs(cm);
}
}
#endif // CONFIG_ENTROPY
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index b0afd46..63b4edd 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ENTROPY_H_
-#define VP10_COMMON_ENTROPY_H_
+#ifndef AV1_COMMON_ENTROPY_H_
+#define AV1_COMMON_ENTROPY_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/prob.h"
#if CONFIG_ANS
@@ -51,7 +51,7 @@
#define ENTROPY_NODES 11
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_pt_energy_class[ENTROPY_TOKENS]);
#define CAT1_MIN_VAL 5
#define CAT2_MIN_VAL 7
@@ -61,50 +61,50 @@
#define CAT6_MIN_VAL 67
// Extra bit probabilities.
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob[14]);
-#if CONFIG_VP9_HIGHBITDEPTH
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high10[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high10[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high10[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high10[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high10[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high10[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high12[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high12[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high12[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high12[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high12[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high12[18]);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define EOB_MODEL_TOKEN 3
typedef struct {
- const vpx_tree_index *tree;
- const vpx_prob *prob;
+ const aom_tree_index *tree;
+ const aom_prob *prob;
int len;
int base_val;
const int16_t *cost;
-} vp10_extra_bit;
+} av1_extra_bit;
// indexed by token value
-extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS];
-#if CONFIG_VP9_HIGHBITDEPTH
-extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
-extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
-#endif // CONFIG_VP9_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS];
+#if CONFIG_AOM_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS];
+extern const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS];
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define DCT_MAX_VALUE 16384
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_MAX_VALUE_HIGH10 65536
#define DCT_MAX_VALUE_HIGH12 262144
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* Coefficients are predicted via a 3-dimensional probability table. */
@@ -135,18 +135,18 @@
// #define ENTROPY_STATS
typedef unsigned int
- vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+ av1_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef unsigned int
- vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
+ av1_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
#define SUBEXP_PARAM 4 /* Subexponential code parameter */
#define MODULUS_PARAM 13 /* Modulus parameter */
-struct VP10Common;
-void vp10_default_coef_probs(struct VP10Common *cm);
-void vp10_adapt_coef_probs(struct VP10Common *cm);
+struct AV1Common;
+void av1_default_coef_probs(struct AV1Common *cm);
+void av1_adapt_coef_probs(struct AV1Common *cm);
#if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(struct VP10Common *cm, int mi_row, int mi_col);
+void av1_partial_adapt_probs(struct AV1Common *cm, int mi_row, int mi_col);
#endif // CONFIG_ENTROPY
// This is the index in the scan order beyond which all coefficients for
@@ -154,11 +154,11 @@
// This macro is currently unused but may be used by certain implementations
#define MAXBAND_INDEX 21
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_8x8plus[1024]);
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x8_8x4[32]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x8_8x4[32]);
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x4[16]);
DECLARE_ALIGNED(16, extern const uint16_t, band_count_table[TX_SIZES_ALL][8]);
DECLARE_ALIGNED(16, extern const uint16_t,
@@ -166,11 +166,11 @@
static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
switch (tx_size) {
- case TX_4X4: return vp10_coefband_trans_4x4;
+ case TX_4X4: return av1_coefband_trans_4x4;
#if CONFIG_EXT_TX
- case TX_4X8: return vp10_coefband_trans_4x8_8x4;
+ case TX_4X8: return av1_coefband_trans_4x8_8x4;
#endif // CONFIG_EXT_TX
- default: return vp10_coefband_trans_8x8plus;
+ default: return av1_coefband_trans_8x8plus;
}
}
@@ -185,22 +185,22 @@
#define PIVOT_NODE 2 // which node is pivot
#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
-extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
-extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
+extern const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
+extern const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
#if CONFIG_ANS
extern const AnsP10
- vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
+ av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
typedef rans_dec_lut coeff_cdf_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
#endif // CONFIG_ANS
-typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
- [UNCONSTRAINED_NODES];
+typedef aom_prob av1_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+ [UNCONSTRAINED_NODES];
-typedef unsigned int vp10_coeff_count_model
+typedef unsigned int av1_coeff_count_model
[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full);
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full);
typedef char ENTROPY_CONTEXT;
@@ -263,7 +263,7 @@
#if CONFIG_ANS
struct frame_contexts;
-void vp10_coef_pareto_cdfs(struct frame_contexts *fc);
+void av1_coef_pareto_cdfs(struct frame_contexts *fc);
#endif // CONFIG_ANS
#if CONFIG_ENTROPY
@@ -283,14 +283,14 @@
#endif // CONFIG_ENTROPY
-static INLINE vpx_prob vp10_merge_probs(vpx_prob pre_prob,
- const unsigned int ct[2],
- unsigned int count_sat,
- unsigned int max_update_factor) {
+static INLINE aom_prob av1_merge_probs(aom_prob pre_prob,
+ const unsigned int ct[2],
+ unsigned int count_sat,
+ unsigned int max_update_factor) {
#if CONFIG_ENTROPY
- const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
+ const aom_prob prob = get_binary_prob(ct[0], ct[1]);
const unsigned int count =
- VPXMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
+ AOMMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
const unsigned int factor = count << (max_update_factor - count_sat);
return weighted_prob(pre_prob, prob, factor);
#else
@@ -298,11 +298,11 @@
#endif // CONFIG_ENTROPY
}
-static INLINE vpx_prob vp10_mode_mv_merge_probs(vpx_prob pre_prob,
- const unsigned int ct[2]) {
+static INLINE aom_prob av1_mode_mv_merge_probs(aom_prob pre_prob,
+ const unsigned int ct[2]) {
#if CONFIG_ENTROPY
- return vp10_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
- MODE_MV_MAX_UPDATE_FACTOR_BITS);
+ return av1_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
+ MODE_MV_MAX_UPDATE_FACTOR_BITS);
#else
return mode_mv_merge_probs(pre_prob, ct);
#endif // CONFIG_ENTROPY
@@ -312,4 +312,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPY_H_
+#endif // AV1_COMMON_ENTROPY_H_
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 98e26e7..80ed00f 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -8,152 +8,153 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/reconinter.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/seg_common.h"
-const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
- { {
- // above = dc
- { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc
- { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v
- { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, // left = h
- { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, // left = d45
- { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, // left = d135
- { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, // left = d117
- { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, // left = d153
- { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, // left = d207
- { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, // left = d63
- { 59, 67, 44, 140, 161, 202, 78, 67, 119 } // left = tm
- },
- {
- // above = v
- { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, // left = dc
- { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, // left = v
- { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, // left = h
- { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, // left = d45
- { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, // left = d135
- { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, // left = d117
- { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, // left = d153
- { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, // left = d207
- { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, // left = d63
- { 36, 61, 116, 114, 128, 162, 80, 125, 82 } // left = tm
- },
- {
- // above = h
- { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, // left = dc
- { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, // left = v
- { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, // left = h
- { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, // left = d45
- { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, // left = d135
- { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, // left = d117
- { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, // left = d153
- { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, // left = d207
- { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, // left = d63
- { 38, 72, 19, 168, 203, 212, 50, 50, 107 } // left = tm
- },
- {
- // above = d45
- { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, // left = dc
- { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, // left = v
- { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, // left = h
- { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, // left = d45
- { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, // left = d135
- { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, // left = d117
- { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, // left = d153
- { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, // left = d207
- { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, // left = d63
- { 47, 47, 43, 114, 137, 181, 100, 99, 95 } // left = tm
- },
- {
- // above = d135
- { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, // left = dc
- { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, // left = v
- { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, // left = h
- { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, // left = d45
- { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, // left = d135
- { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, // left = d117
- { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, // left = d153
- { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, // left = d207
- { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, // left = d63
- { 47, 54, 34, 146, 108, 203, 72, 103, 151 } // left = tm
- },
- {
- // above = d117
- { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, // left = dc
- { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, // left = v
- { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, // left = h
- { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, // left = d45
- { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, // left = d135
- { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, // left = d117
- { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, // left = d153
- { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, // left = d207
- { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, // left = d63
- { 38, 44, 51, 136, 74, 162, 57, 97, 121 } // left = tm
- },
- {
- // above = d153
- { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, // left = dc
- { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, // left = v
- { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, // left = h
- { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, // left = d45
- { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, // left = d135
- { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, // left = d117
- { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, // left = d153
- { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, // left = d207
- { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, // left = d63
- { 37, 49, 25, 129, 168, 164, 41, 54, 148 } // left = tm
- },
- {
- // above = d207
- { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, // left = dc
- { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, // left = v
- { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, // left = h
- { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, // left = d45
- { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, // left = d135
- { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, // left = d117
- { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, // left = d153
- { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, // left = d207
- { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, // left = d63
- { 40, 61, 26, 126, 152, 206, 61, 59, 93 } // left = tm
- },
- {
- // above = d63
- { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, // left = dc
- { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, // left = v
- { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, // left = h
- { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, // left = d45
- { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, // left = d135
- { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, // left = d117
- { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, // left = d153
- { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, // left = d207
- { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, // left = d63
- { 36, 38, 48, 92, 122, 165, 88, 137, 91 } // left = tm
- },
- {
- // above = tm
- { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, // left = dc
- { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, // left = v
- { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, // left = h
- { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, // left = d45
- { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, // left = d135
- { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, // left = d117
- { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, // left = d153
- { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, // left = d207
- { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63
- { 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm
- } };
+const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = {
+ {
+ // above = dc
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, // left = h
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, // left = d45
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, // left = d135
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, // left = d117
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, // left = d153
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, // left = d207
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, // left = d63
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 } // left = tm
+ },
+ {
+ // above = v
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, // left = dc
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, // left = v
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, // left = h
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, // left = d45
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, // left = d135
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, // left = d117
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, // left = d153
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, // left = d207
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, // left = d63
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 } // left = tm
+ },
+ {
+ // above = h
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, // left = dc
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, // left = v
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, // left = h
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, // left = d45
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, // left = d135
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, // left = d117
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, // left = d153
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, // left = d207
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, // left = d63
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 } // left = tm
+ },
+ {
+ // above = d45
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, // left = dc
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, // left = v
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, // left = h
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, // left = d45
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, // left = d135
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, // left = d117
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, // left = d153
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, // left = d207
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, // left = d63
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 } // left = tm
+ },
+ {
+ // above = d135
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, // left = dc
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, // left = v
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, // left = h
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, // left = d45
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, // left = d135
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, // left = d117
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, // left = d153
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, // left = d207
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, // left = d63
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 } // left = tm
+ },
+ {
+ // above = d117
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, // left = dc
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, // left = v
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, // left = h
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, // left = d45
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, // left = d135
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, // left = d117
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, // left = d153
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, // left = d207
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, // left = d63
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 } // left = tm
+ },
+ {
+ // above = d153
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, // left = dc
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, // left = v
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, // left = h
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, // left = d45
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, // left = d135
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, // left = d117
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, // left = d153
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, // left = d207
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, // left = d63
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 } // left = tm
+ },
+ {
+ // above = d207
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, // left = dc
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, // left = v
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, // left = h
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, // left = d45
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, // left = d135
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, // left = d117
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, // left = d153
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, // left = d207
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, // left = d63
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 } // left = tm
+ },
+ {
+ // above = d63
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, // left = dc
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, // left = v
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, // left = h
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, // left = d45
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, // left = d135
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, // left = d117
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, // left = d153
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, // left = d207
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, // left = d63
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 } // left = tm
+ },
+ {
+ // above = tm
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, // left = dc
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, // left = v
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, // left = h
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, // left = d45
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, // left = d135
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, // left = d117
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, // left = d153
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, // left = d207
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm
+ }
+};
-static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
+static const aom_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
{ 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32
{ 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32
};
-static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
+static const aom_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
{ 120, 7, 76, 176, 208, 126, 28, 54, 103 }, // y = dc
{ 48, 12, 154, 155, 139, 90, 34, 117, 119 }, // y = v
{ 67, 6, 25, 204, 243, 158, 13, 21, 96 }, // y = h
@@ -167,7 +168,7 @@
};
#if CONFIG_EXT_PARTITION_TYPES
-static const vpx_prob
+static const aom_prob
default_partition_probs[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
{ 199, 122, 141, 128, 128, 128, 128 }, // a/l both not split
@@ -198,7 +199,7 @@
#endif // CONFIG_EXT_PARTITION
};
#else
-static const vpx_prob
+static const aom_prob
default_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
{ 199, 122, 141 }, // a/l both not split
@@ -231,27 +232,27 @@
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_REF_MV
-static const vpx_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
+static const aom_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
200, 180, 150, 150, 110, 70, 60,
};
-static const vpx_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
+static const aom_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
192, 64,
};
-static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
+static const aom_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
220, 220, 200, 200, 180, 128, 30, 220, 30,
};
-static const vpx_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
+static const aom_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
128, 160 };
#if CONFIG_EXT_INTER
-static const vpx_prob default_new2mv_prob = 180;
+static const aom_prob default_new2mv_prob = 180;
#endif // CONFIG_EXT_INTER
#endif // CONFIG_REF_MV
-static const vpx_prob
+static const aom_prob
default_inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] = {
#if CONFIG_EXT_INTER
// TODO(zoeliu): To adjust the initial default probs
@@ -274,7 +275,7 @@
};
#if CONFIG_EXT_INTER
-static const vpx_prob default_inter_compound_mode_probs
+static const aom_prob default_inter_compound_mode_probs
[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1] = {
{ 2, 173, 68, 192, 64, 192, 128, 180, 180 }, // 0 = both zero mv
{ 7, 145, 160, 192, 64, 192, 128, 180, 180 }, // 1 = 1 zero + 1 predicted
@@ -285,11 +286,11 @@
{ 25, 29, 50, 192, 64, 192, 128, 180, 180 }, // 6 = two intra neighbours
};
-static const vpx_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
+static const aom_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
208, 208, 208, 208,
};
-static const vpx_prob
+static const aom_prob
default_interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1] = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
@@ -297,14 +298,14 @@
{ 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32
};
-static const vpx_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
#if CONFIG_EXT_PARTITION
208, 208, 208
#endif // CONFIG_EXT_PARTITION
};
-static const vpx_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
#if CONFIG_EXT_PARTITION
255, 255, 255
@@ -314,10 +315,10 @@
// Change this section appropriately once warped motion is supported
#if CONFIG_OBMC && !CONFIG_WARPED_MOTION
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
-SIMPLE_TRANSLATION, -OBMC_CAUSAL
};
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
{
{ 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
{ 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -328,10 +329,10 @@
#elif !CONFIG_OBMC && CONFIG_WARPED_MOTION
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
-SIMPLE_TRANSLATION, -WARPED_CAUSAL
};
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
{
{ 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
{ 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -342,10 +343,10 @@
#elif CONFIG_OBMC && CONFIG_WARPED_MOTION
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
-SIMPLE_TRANSLATION, 2, -OBMC_CAUSAL, -WARPED_CAUSAL,
};
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
{
{ 255, 200 }, { 255, 200 }, { 255, 200 }, { 151, 200 }, { 153, 200 },
{ 144, 200 }, { 178, 200 }, { 165, 200 }, { 160, 200 }, { 207, 200 },
@@ -357,7 +358,7 @@
#endif // CONFIG_OBMC || !CONFIG_WARPED_MOTION
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
+const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
-DC_PRED, 2, /* 0 = DC_NODE */
-TM_PRED, 4, /* 1 = TM_NODE */
-V_PRED, 6, /* 2 = V_NODE */
@@ -369,7 +370,7 @@
-D153_PRED, -D207_PRED /* 8 = D153_NODE */
};
-const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
+const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
-INTER_OFFSET(ZEROMV), 2,
-INTER_OFFSET(NEARESTMV), 4,
#if CONFIG_EXT_INTER
@@ -382,7 +383,7 @@
#if CONFIG_EXT_INTER
/* clang-format off */
-const vpx_tree_index vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
+const aom_tree_index av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
-II_DC_PRED, 2, /* 0 = II_DC_NODE */
-II_TM_PRED, 4, /* 1 = II_TM_NODE */
-II_V_PRED, 6, /* 2 = II_V_NODE */
@@ -394,7 +395,7 @@
-II_D153_PRED, -II_D207_PRED /* 8 = II_D153_NODE */
};
-const vpx_tree_index vp10_inter_compound_mode_tree
+const aom_tree_index av1_inter_compound_mode_tree
[TREE_SIZE(INTER_COMPOUND_MODES)] = {
-INTER_COMPOUND_OFFSET(ZERO_ZEROMV), 2,
-INTER_COMPOUND_OFFSET(NEAREST_NEARESTMV), 4,
@@ -410,13 +411,13 @@
/* clang-format on */
#endif // CONFIG_EXT_INTER
-const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
+const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
-PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
};
#if CONFIG_EXT_PARTITION_TYPES
/* clang-format off */
-const vpx_tree_index vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
+const aom_tree_index av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
-PARTITION_NONE, 2,
6, 4,
8, -PARTITION_SPLIT,
@@ -428,16 +429,16 @@
/* clang-format on */
#endif // CONFIG_EXT_PARTITION_TYPES
-static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
+static const aom_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
9, 102, 187, 225
};
-static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
+static const aom_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
239, 183, 119, 96, 41
};
#if CONFIG_EXT_REFS
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
// TODO(zoeliu): To adjust the initial prob values.
{ 33, 16, 16 },
{ 77, 74, 74 },
@@ -445,16 +446,16 @@
{ 172, 170, 170 },
{ 238, 247, 247 }
};
-static const vpx_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
+static const aom_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
{ 16 }, { 74 }, { 142 }, { 170 }, { 247 }
};
#else
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
{ 50 }, { 126 }, { 123 }, { 221 }, { 226 }
};
#endif // CONFIG_EXT_REFS
-static const vpx_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
+static const aom_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
#if CONFIG_EXT_REFS
{ 33, 16, 16, 16, 16 },
{ 77, 74, 74, 74, 74 },
@@ -466,14 +467,14 @@
#endif // CONFIG_EXT_REFS
};
-const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
+const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
-TWO_COLORS, 2, -THREE_COLORS, 4, -FOUR_COLORS, 6,
-FIVE_COLORS, 8, -SIX_COLORS, 10, -SEVEN_COLORS, -EIGHT_COLORS,
};
// TODO(huisu): tune these probs
-const vpx_prob
- vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+const aom_prob
+ av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
{ 96, 89, 100, 64, 77, 130 }, { 22, 15, 44, 16, 34, 82 },
{ 30, 19, 57, 18, 38, 86 }, { 94, 36, 104, 23, 43, 92 },
{ 116, 76, 107, 46, 65, 105 }, { 112, 82, 94, 40, 70, 112 },
@@ -485,21 +486,20 @@
#endif // CONFIG_EXT_PARTITION
};
-const vpx_prob
- vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] =
- {
- { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
- { 72, 151, 204, 139, 155, 161 }, { 78, 135, 171, 104, 120, 173 },
- { 59, 92, 131, 78, 92, 142 }, { 75, 118, 149, 84, 90, 128 },
- { 89, 87, 92, 66, 66, 128 }, { 67, 53, 54, 55, 66, 93 },
- { 120, 130, 83, 171, 75, 214 }, { 72, 55, 66, 68, 79, 107 },
+const aom_prob
+ av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+ { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
+ { 72, 151, 204, 139, 155, 161 }, { 78, 135, 171, 104, 120, 173 },
+ { 59, 92, 131, 78, 92, 142 }, { 75, 118, 149, 84, 90, 128 },
+ { 89, 87, 92, 66, 66, 128 }, { 67, 53, 54, 55, 66, 93 },
+ { 120, 130, 83, 171, 75, 214 }, { 72, 55, 66, 68, 79, 107 },
#if CONFIG_EXT_PARTITION
- { 72, 55, 66, 68, 79, 107 }, { 72, 55, 66, 68, 79, 107 },
- { 72, 55, 66, 68, 79, 107 },
+ { 72, 55, 66, 68, 79, 107 }, { 72, 55, 66, 68, 79, 107 },
+ { 72, 55, 66, 68, 79, 107 },
#endif // CONFIG_EXT_PARTITION
- };
+ };
-const vpx_prob vp10_default_palette_y_mode_prob
+const aom_prob av1_default_palette_y_mode_prob
[PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS] = {
{ 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
{ 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
@@ -510,10 +510,10 @@
#endif // CONFIG_EXT_PARTITION
};
-const vpx_prob vp10_default_palette_uv_mode_prob[2] = { 253, 229 };
+const aom_prob av1_default_palette_uv_mode_prob[2] = { 253, 229 };
-const vpx_tree_index
- vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
+const aom_tree_index
+ av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
{ // 2 colors
-PALETTE_COLOR_ONE, -PALETTE_COLOR_TWO },
{ // 3 colors
@@ -537,7 +537,7 @@
-PALETTE_COLOR_SEVEN, -PALETTE_COLOR_EIGHT },
};
-const vpx_prob vp10_default_palette_y_color_prob
+const aom_prob av1_default_palette_y_color_prob
[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
{
// 2 colors
@@ -674,7 +674,7 @@
}
};
-const vpx_prob vp10_default_palette_uv_color_prob
+const aom_prob av1_default_palette_uv_color_prob
[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
{
// 2 colors
@@ -822,7 +822,7 @@
9680, 10648, 10890, 13310
};
-const vpx_tree_index vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
+const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
{
// Max tx_size is 8X8
-TX_4X4, -TX_8X8,
@@ -837,7 +837,7 @@
},
};
-static const vpx_prob
+static const aom_prob
default_tx_size_prob[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1] = {
{
// Max tx_size is 8X8
@@ -856,8 +856,8 @@
},
};
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
- int c, int n, int *color_order) {
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+ int c, int n, int *color_order) {
int i, j, max, max_idx, temp;
int scores[PALETTE_MAX_SIZE + 10];
int weights[4] = { 3, 2, 3, 2 };
@@ -926,15 +926,15 @@
}
#if CONFIG_VAR_TX
-static const vpx_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
+static const aom_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
192, 128, 64, 192, 128, 64, 192, 128, 64,
};
#endif
-static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
+static const aom_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
#if CONFIG_EXT_INTERP
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
#if CONFIG_DUAL_FILTER
{ 235, 192, 128, 128 }, { 36, 243, 208, 128 }, { 34, 16, 128, 128 },
@@ -955,7 +955,7 @@
};
#else // CONFIG_EXT_INTERP
#if CONFIG_DUAL_FILTER
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
{ 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
@@ -966,7 +966,7 @@
{ 235, 162 }, { 36, 255 }, { 34, 3 }, { 10, 3 },
};
#else
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
{ 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
};
@@ -975,7 +975,7 @@
#if CONFIG_EXT_TX
/* clang-format off */
-const vpx_tree_index vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER]
+const aom_tree_index av1_ext_tx_inter_tree[EXT_TX_SETS_INTER]
[TREE_SIZE(TX_TYPES)] = {
{ // ToDo(yaowu): remove used entry 0.
0
@@ -1012,7 +1012,7 @@
}
};
-const vpx_tree_index vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
+const aom_tree_index av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
[TREE_SIZE(TX_TYPES)] = {
{ // ToDo(yaowu): remove unused entry 0.
0
@@ -1032,7 +1032,7 @@
};
/* clang-format on */
-static const vpx_prob
+static const aom_prob
default_inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1] = {
{
// ToDo(yaowu): remove unused entry 0.
@@ -1073,7 +1073,7 @@
}
};
-static const vpx_prob default_intra_ext_tx_prob
+static const aom_prob default_intra_ext_tx_prob
[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES - 1] = {
{
// ToDo(yaowu): remove unused entry 0.
@@ -1237,41 +1237,41 @@
#else
/* clang-format off */
-const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
+const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
-DCT_DCT, 2,
-ADST_ADST, 4,
-ADST_DCT, -DCT_ADST
};
/* clang-format on */
-static const vpx_prob
+static const aom_prob
default_intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1] = {
{ { 240, 85, 128 }, { 4, 1, 248 }, { 4, 1, 8 }, { 4, 248, 128 } },
{ { 244, 85, 128 }, { 8, 2, 248 }, { 8, 2, 8 }, { 8, 248, 128 } },
{ { 248, 85, 128 }, { 16, 4, 248 }, { 16, 4, 8 }, { 16, 248, 128 } },
};
-static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
+static const aom_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
{ 160, 85, 128 }, { 176, 85, 128 }, { 192, 85, 128 },
};
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA
-static const vpx_prob
+static const aom_prob
default_intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1] = {
{ 98, 63, 60 }, { 98, 82, 80 }, { 94, 65, 103 },
{ 49, 25, 24 }, { 72, 38, 50 },
};
-static const vpx_prob default_ext_intra_probs[2] = { 230, 230 };
+static const aom_prob default_ext_intra_probs[2] = { 230, 230 };
-const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
+const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
-INTRA_FILTER_LINEAR, 2, -INTRA_FILTER_8TAP, 4, -INTRA_FILTER_8TAP_SHARP,
-INTRA_FILTER_8TAP_SMOOTH,
};
#endif // CONFIG_EXT_INTRA
#if CONFIG_SUPERTX
-static const vpx_prob
+static const aom_prob
default_supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES] = {
{ 1, 160, 160, 170 }, { 1, 200, 200, 210 },
};
@@ -1283,58 +1283,58 @@
};
static void init_mode_probs(FRAME_CONTEXT *fc) {
- vp10_copy(fc->uv_mode_prob, default_uv_probs);
- vp10_copy(fc->y_mode_prob, default_if_y_probs);
- vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
- vp10_copy(fc->partition_prob, default_partition_probs);
- vp10_copy(fc->intra_inter_prob, default_intra_inter_p);
- vp10_copy(fc->comp_inter_prob, default_comp_inter_p);
- vp10_copy(fc->comp_ref_prob, default_comp_ref_p);
+ av1_copy(fc->uv_mode_prob, default_uv_probs);
+ av1_copy(fc->y_mode_prob, default_if_y_probs);
+ av1_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
+ av1_copy(fc->partition_prob, default_partition_probs);
+ av1_copy(fc->intra_inter_prob, default_intra_inter_p);
+ av1_copy(fc->comp_inter_prob, default_comp_inter_p);
+ av1_copy(fc->comp_ref_prob, default_comp_ref_p);
#if CONFIG_EXT_REFS
- vp10_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
+ av1_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
#endif // CONFIG_EXT_REFS
- vp10_copy(fc->single_ref_prob, default_single_ref_p);
- vp10_copy(fc->tx_size_probs, default_tx_size_prob);
+ av1_copy(fc->single_ref_prob, default_single_ref_p);
+ av1_copy(fc->tx_size_probs, default_tx_size_prob);
#if CONFIG_VAR_TX
- vp10_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
+ av1_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
#endif
- vp10_copy(fc->skip_probs, default_skip_probs);
+ av1_copy(fc->skip_probs, default_skip_probs);
#if CONFIG_REF_MV
- vp10_copy(fc->newmv_prob, default_newmv_prob);
- vp10_copy(fc->zeromv_prob, default_zeromv_prob);
- vp10_copy(fc->refmv_prob, default_refmv_prob);
- vp10_copy(fc->drl_prob, default_drl_prob);
+ av1_copy(fc->newmv_prob, default_newmv_prob);
+ av1_copy(fc->zeromv_prob, default_zeromv_prob);
+ av1_copy(fc->refmv_prob, default_refmv_prob);
+ av1_copy(fc->drl_prob, default_drl_prob);
#if CONFIG_EXT_INTER
fc->new2mv_prob = default_new2mv_prob;
#endif // CONFIG_EXT_INTER
#endif // CONFIG_REF_MV
- vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+ av1_copy(fc->inter_mode_probs, default_inter_mode_probs);
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
- vp10_copy(fc->motvar_prob, default_motvar_prob);
+ av1_copy(fc->motvar_prob, default_motvar_prob);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
- vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
- vp10_copy(fc->interintra_prob, default_interintra_prob);
- vp10_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
- vp10_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
- vp10_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
+ av1_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
+ av1_copy(fc->interintra_prob, default_interintra_prob);
+ av1_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
+ av1_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
+ av1_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
#endif // CONFIG_EXT_INTER
#if CONFIG_SUPERTX
- vp10_copy(fc->supertx_prob, default_supertx_prob);
+ av1_copy(fc->supertx_prob, default_supertx_prob);
#endif // CONFIG_SUPERTX
- vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
- vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+ av1_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+ av1_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
#if CONFIG_EXT_INTRA
- vp10_copy(fc->ext_intra_probs, default_ext_intra_probs);
- vp10_copy(fc->intra_filter_probs, default_intra_filter_probs);
+ av1_copy(fc->ext_intra_probs, default_ext_intra_probs);
+ av1_copy(fc->intra_filter_probs, default_intra_filter_probs);
#endif // CONFIG_EXT_INTRA
- vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
- vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
+ av1_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
+ av1_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
}
#if CONFIG_EXT_INTERP
-const vpx_tree_index
- vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] = {
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+ {
-EIGHTTAP_REGULAR,
2,
4,
@@ -1345,23 +1345,22 @@
-MULTITAP_SHARP2,
};
#else
-const vpx_tree_index vp10_switchable_interp_tree[TREE_SIZE(
- SWITCHABLE_FILTERS)] = { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH,
- -MULTITAP_SHARP };
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+ { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH, -MULTITAP_SHARP };
#endif // CONFIG_EXT_INTERP
-void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
int i, j;
FRAME_CONTEXT *fc = cm->fc;
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
const FRAME_COUNTS *counts = &cm->counts;
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- fc->intra_inter_prob[i] = vp10_mode_mv_merge_probs(
+ fc->intra_inter_prob[i] = av1_mode_mv_merge_probs(
pre_fc->intra_inter_prob[i], counts->intra_inter[i]);
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- fc->comp_inter_prob[i] = vp10_mode_mv_merge_probs(
- pre_fc->comp_inter_prob[i], counts->comp_inter[i]);
+ fc->comp_inter_prob[i] = av1_mode_mv_merge_probs(pre_fc->comp_inter_prob[i],
+ counts->comp_inter[i]);
#if CONFIG_EXT_REFS
for (i = 0; i < REF_CONTEXTS; i++)
@@ -1381,36 +1380,36 @@
for (i = 0; i < REF_CONTEXTS; i++)
for (j = 0; j < (SINGLE_REFS - 1); j++)
- fc->single_ref_prob[i][j] = vp10_mode_mv_merge_probs(
+ fc->single_ref_prob[i][j] = av1_mode_mv_merge_probs(
pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
#if CONFIG_REF_MV
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
fc->newmv_prob[i] =
- vp10_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
+ av1_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
- fc->zeromv_prob[i] = vp10_mode_mv_merge_probs(pre_fc->zeromv_prob[i],
- counts->zeromv_mode[i]);
+ fc->zeromv_prob[i] =
+ av1_mode_mv_merge_probs(pre_fc->zeromv_prob[i], counts->zeromv_mode[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
fc->refmv_prob[i] =
- vp10_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
+ av1_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
fc->drl_prob[i] =
- vp10_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
+ av1_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
#if CONFIG_EXT_INTER
fc->new2mv_prob =
- vp10_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
+ av1_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
#endif // CONFIG_EXT_INTER
#else
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
- vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
+ aom_tree_merge_probs(av1_inter_mode_tree, pre_fc->inter_mode_probs[i],
counts->inter_mode[i], fc->inter_mode_probs[i]);
#endif
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
- vpx_tree_merge_probs(vp10_motvar_tree, pre_fc->motvar_prob[i],
+ aom_tree_merge_probs(av1_motvar_tree, pre_fc->motvar_prob[i],
counts->motvar[i], fc->motvar_prob[i]);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -1418,7 +1417,7 @@
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
int j;
for (j = 1; j < TX_SIZES; ++j) {
- fc->supertx_prob[i][j] = vp10_mode_mv_merge_probs(
+ fc->supertx_prob[i][j] = av1_mode_mv_merge_probs(
pre_fc->supertx_prob[i][j], counts->supertx[i][j]);
}
}
@@ -1426,44 +1425,44 @@
#if CONFIG_EXT_INTER
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
- vpx_tree_merge_probs(
- vp10_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
+ aom_tree_merge_probs(
+ av1_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
counts->inter_compound_mode[i], fc->inter_compound_mode_probs[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
if (is_interintra_allowed_bsize_group(i))
- fc->interintra_prob[i] = vp10_mode_mv_merge_probs(
+ fc->interintra_prob[i] = av1_mode_mv_merge_probs(
pre_fc->interintra_prob[i], counts->interintra[i]);
}
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
- vpx_tree_merge_probs(
- vp10_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
+ aom_tree_merge_probs(
+ av1_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
counts->interintra_mode[i], fc->interintra_mode_prob[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
- fc->wedge_interintra_prob[i] = vp10_mode_mv_merge_probs(
+ fc->wedge_interintra_prob[i] = av1_mode_mv_merge_probs(
pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interinter_wedge_used(i))
- fc->wedge_interinter_prob[i] = vp10_mode_mv_merge_probs(
+ fc->wedge_interinter_prob[i] = av1_mode_mv_merge_probs(
pre_fc->wedge_interinter_prob[i], counts->wedge_interinter[i]);
}
#endif // CONFIG_EXT_INTER
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
- vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
+ aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
counts->y_mode[i], fc->y_mode_prob[i]);
if (cm->interp_filter == SWITCHABLE) {
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- vpx_tree_merge_probs(
- vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+ aom_tree_merge_probs(
+ av1_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
counts->switchable_interp[i], fc->switchable_interp_prob[i]);
}
}
-void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
int i, j;
FRAME_CONTEXT *fc = cm->fc;
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -1472,7 +1471,7 @@
if (cm->tx_mode == TX_MODE_SELECT) {
for (i = 0; i < TX_SIZES - 1; ++i) {
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
- vpx_tree_merge_probs(vp10_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
+ aom_tree_merge_probs(av1_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
counts->tx_size[i][j], fc->tx_size_probs[i][j]);
}
}
@@ -1480,21 +1479,21 @@
#if CONFIG_VAR_TX
if (cm->tx_mode == TX_MODE_SELECT)
for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
- fc->txfm_partition_prob[i] = vp10_mode_mv_merge_probs(
+ fc->txfm_partition_prob[i] = av1_mode_mv_merge_probs(
pre_fc->txfm_partition_prob[i], counts->txfm_partition[i]);
#endif
for (i = 0; i < SKIP_CONTEXTS; ++i)
fc->skip_probs[i] =
- vp10_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
+ av1_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
#if CONFIG_EXT_TX
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
if (use_inter_ext_tx_for_txsize[s][i]) {
- vpx_tree_merge_probs(
- vp10_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
+ aom_tree_merge_probs(
+ av1_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
counts->inter_ext_tx[s][i], fc->inter_ext_tx_prob[s][i]);
}
}
@@ -1502,8 +1501,8 @@
if (use_intra_ext_tx_for_txsize[s][i]) {
int j;
for (j = 0; j < INTRA_MODES; ++j)
- vpx_tree_merge_probs(
- vp10_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
+ aom_tree_merge_probs(
+ av1_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
counts->intra_ext_tx[s][i][j], fc->intra_ext_tx_prob[s][i][j]);
}
}
@@ -1511,52 +1510,52 @@
#else
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
+ aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
counts->intra_ext_tx[i][j],
fc->intra_ext_tx_prob[i][j]);
}
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+ aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
}
#endif // CONFIG_EXT_TX
if (cm->seg.temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
- fc->seg.pred_probs[i] = vp10_mode_mv_merge_probs(
- pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
+ fc->seg.pred_probs[i] = av1_mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
+ counts->seg.pred[i]);
- vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
counts->seg.tree_mispred, fc->seg.tree_probs);
} else {
- vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
counts->seg.tree_total, fc->seg.tree_probs);
}
for (i = 0; i < INTRA_MODES; ++i)
- vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+ aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
counts->uv_mode[i], fc->uv_mode_prob[i]);
#if CONFIG_EXT_PARTITION_TYPES
- vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[0],
+ aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[0],
counts->partition[0], fc->partition_prob[0]);
for (i = 1; i < PARTITION_CONTEXTS; i++)
- vpx_tree_merge_probs(vp10_ext_partition_tree, pre_fc->partition_prob[i],
+ aom_tree_merge_probs(av1_ext_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#else
for (i = 0; i < PARTITION_CONTEXTS; i++)
- vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+ aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_INTRA
for (i = 0; i < PLANE_TYPES; ++i) {
- fc->ext_intra_probs[i] = vp10_mode_mv_merge_probs(
- pre_fc->ext_intra_probs[i], counts->ext_intra[i]);
+ fc->ext_intra_probs[i] = av1_mode_mv_merge_probs(pre_fc->ext_intra_probs[i],
+ counts->ext_intra[i]);
}
for (i = 0; i < INTRA_FILTERS + 1; ++i)
- vpx_tree_merge_probs(vp10_intra_filter_tree, pre_fc->intra_filter_probs[i],
+ aom_tree_merge_probs(av1_intra_filter_tree, pre_fc->intra_filter_probs[i],
counts->intra_filter[i], fc->intra_filter_probs[i]);
#endif // CONFIG_EXT_INTRA
}
@@ -1579,13 +1578,13 @@
lf->mode_deltas[1] = 0;
}
-void vp10_setup_past_independence(VP10_COMMON *cm) {
+void av1_setup_past_independence(AV1_COMMON *cm) {
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
struct loopfilter *const lf = &cm->lf;
int i;
- vp10_clearall_segfeatures(&cm->seg);
+ av1_clearall_segfeatures(&cm->seg);
cm->seg.abs_delta = SEGMENT_DELTADATA;
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
@@ -1595,8 +1594,8 @@
memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
// Reset the mode ref deltas for loop filter
- vp10_zero(lf->last_ref_deltas);
- vp10_zero(lf->last_mode_deltas);
+ av1_zero(lf->last_ref_deltas);
+ av1_zero(lf->last_mode_deltas);
set_default_lf_deltas(lf);
// To force update of the sharpness
@@ -1608,9 +1607,9 @@
}
#endif // CONFIG_LOOP_RESTORATION
- vp10_default_coef_probs(cm);
+ av1_default_coef_probs(cm);
init_mode_probs(cm->fc);
- vp10_init_mv_probs(cm);
+ av1_init_mv_probs(cm);
cm->fc->initialized = 1;
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 4616aa2..e437b3f 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ENTROPYMODE_H_
-#define VP10_COMMON_ENTROPYMODE_H_
+#ifndef AV1_COMMON_ENTROPYMODE_H_
+#define AV1_COMMON_ENTROPYMODE_H_
#include "av1/common/entropy.h"
#include "av1/common/entropymv.h"
#include "av1/common/filter.h"
#include "av1/common/seg_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
#ifdef __cplusplus
extern "C" {
@@ -36,7 +36,7 @@
#define PALETTE_Y_MODE_CONTEXTS 3
#define PALETTE_MAX_BLOCK_SIZE (64 * 64)
-struct VP10Common;
+struct AV1Common;
struct seg_counts {
unsigned int tree_total[MAX_SEGMENTS];
@@ -45,58 +45,58 @@
};
typedef struct frame_contexts {
- vpx_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
- vpx_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+ aom_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
+ aom_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
#if CONFIG_EXT_PARTITION_TYPES
- vpx_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
+ aom_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
#else
- vpx_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+ aom_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
#endif
- vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
+ av1_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
#if CONFIG_ANS
coeff_cdf_model coef_cdfs[TX_SIZES][PLANE_TYPES];
#endif
- vpx_prob
+ aom_prob
switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
1];
#if CONFIG_REF_MV
- vpx_prob newmv_prob[NEWMV_MODE_CONTEXTS];
- vpx_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
- vpx_prob refmv_prob[REFMV_MODE_CONTEXTS];
- vpx_prob drl_prob[DRL_MODE_CONTEXTS];
+ aom_prob newmv_prob[NEWMV_MODE_CONTEXTS];
+ aom_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
+ aom_prob refmv_prob[REFMV_MODE_CONTEXTS];
+ aom_prob drl_prob[DRL_MODE_CONTEXTS];
#if CONFIG_EXT_INTER
- vpx_prob new2mv_prob;
+ aom_prob new2mv_prob;
#endif // CONFIG_EXT_INTER
#endif // CONFIG_REF_MV
- vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
+ aom_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
#if CONFIG_EXT_INTER
- vpx_prob
+ aom_prob
inter_compound_mode_probs[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1];
- vpx_prob interintra_prob[BLOCK_SIZE_GROUPS];
- vpx_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
- vpx_prob wedge_interintra_prob[BLOCK_SIZES];
- vpx_prob wedge_interinter_prob[BLOCK_SIZES];
+ aom_prob interintra_prob[BLOCK_SIZE_GROUPS];
+ aom_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
+ aom_prob wedge_interintra_prob[BLOCK_SIZES];
+ aom_prob wedge_interinter_prob[BLOCK_SIZES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
- vpx_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
+ aom_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
- vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
- vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
- vpx_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
+ aom_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+ aom_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+ aom_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
#if CONFIG_EXT_REFS
- vpx_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
- vpx_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
+ aom_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
+ aom_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
#else
- vpx_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
+ aom_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
#endif // CONFIG_EXT_REFS
- vpx_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
+ aom_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
#if CONFIG_VAR_TX
- vpx_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
+ aom_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
#endif
- vpx_prob skip_probs[SKIP_CONTEXTS];
+ aom_prob skip_probs[SKIP_CONTEXTS];
#if CONFIG_REF_MV
nmv_context nmvc[NMV_CONTEXTS];
#else
@@ -104,24 +104,24 @@
#endif
int initialized;
#if CONFIG_EXT_TX
- vpx_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
- vpx_prob
+ aom_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
+ aom_prob
intra_ext_tx_prob[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES -
1];
#else
- vpx_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
- vpx_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
+ aom_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
+ aom_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
#endif // CONFIG_EXT_TX
#if CONFIG_SUPERTX
- vpx_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
+ aom_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
#endif // CONFIG_SUPERTX
struct segmentation_probs seg;
#if CONFIG_EXT_INTRA
- vpx_prob ext_intra_probs[PLANE_TYPES];
- vpx_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
+ aom_prob ext_intra_probs[PLANE_TYPES];
+ aom_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
#endif // CONFIG_EXT_INTRA
#if CONFIG_GLOBAL_MOTION
- vpx_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
+ aom_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
#endif // CONFIG_GLOBAL_MOTION
} FRAME_CONTEXT;
@@ -136,7 +136,7 @@
#else
unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
#endif
- vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
+ av1_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
unsigned int
eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
unsigned int
@@ -207,61 +207,60 @@
#endif // CONFIG_EXT_INTRA
} FRAME_COUNTS;
-extern const vpx_prob
- vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
-extern const vpx_prob vp10_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
- [PALETTE_Y_MODE_CONTEXTS];
-extern const vpx_prob vp10_default_palette_uv_mode_prob[2];
-extern const vpx_prob
- vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob
- vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob vp10_default_palette_y_color_prob
+extern const aom_prob
+ av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+extern const aom_prob av1_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
+ [PALETTE_Y_MODE_CONTEXTS];
+extern const aom_prob av1_default_palette_uv_mode_prob[2];
+extern const aom_prob
+ av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob
+ av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob av1_default_palette_y_color_prob
[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
-extern const vpx_prob vp10_default_palette_uv_color_prob
+extern const aom_prob av1_default_palette_uv_color_prob
[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
-extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
-extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
+extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
#if CONFIG_EXT_INTER
-extern const vpx_tree_index
- vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
-extern const vpx_tree_index
- vp10_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
+extern const aom_tree_index
+ av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
+extern const aom_tree_index
+ av1_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
#endif // CONFIG_EXT_INTER
-extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
+extern const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)];
#if CONFIG_EXT_PARTITION_TYPES
-extern const vpx_tree_index
- vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
+extern const aom_tree_index
+ av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
#endif
-extern const vpx_tree_index
- vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
-extern const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
-extern const vpx_tree_index
- vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
-extern const vpx_tree_index
- vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
+extern const aom_tree_index
+ av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
+extern const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
+extern const aom_tree_index
+ av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
+extern const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
#if CONFIG_EXT_INTRA
-extern const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
+extern const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_TX
-extern const vpx_tree_index
- vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
-extern const vpx_tree_index
- vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+ av1_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+ av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
#else
-extern const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)];
#endif // CONFIG_EXT_TX
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-extern const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
+extern const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-void vp10_setup_past_independence(struct VP10Common *cm);
+void av1_setup_past_independence(struct AV1Common *cm);
-void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
-void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
+void av1_adapt_intra_frame_probs(struct AV1Common *cm);
+void av1_adapt_inter_frame_probs(struct AV1Common *cm);
-static INLINE int vp10_ceil_log2(int n) {
+static INLINE int av1_ceil_log2(int n) {
int i = 1, p = 2;
while (p < n) {
i++;
@@ -270,11 +269,11 @@
return i;
}
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
- int c, int n, int *color_order);
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+ int c, int n, int *color_order);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPYMODE_H_
+#endif // AV1_COMMON_ENTROPYMODE_H_
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index f3dba3f..5abc252 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -14,12 +14,12 @@
// Integer pel reference mv threshold for use of high-precision 1/8 mv
#define COMPANDED_MVREF_THRESH 8
-const vpx_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
+const aom_tree_index av1_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
-MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
};
/* clang-format off */
-const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
+const aom_tree_index av1_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
-MV_CLASS_0, 2,
-MV_CLASS_1, 4,
6, 8,
@@ -33,12 +33,12 @@
};
/* clang-format on */
-const vpx_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
+const aom_tree_index av1_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
-0, -1,
};
-const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1,
- 4, -2, -3 };
+const aom_tree_index av1_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1,
+ 4, -2, -3 };
static const nmv_context default_nmv_context = {
#if CONFIG_REF_MV
@@ -115,12 +115,12 @@
};
#if CONFIG_GLOBAL_MOTION
-const vpx_tree_index
- vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
+const aom_tree_index
+ av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
-GLOBAL_ZERO, 2, -GLOBAL_TRANSLATION, 4, -GLOBAL_ROTZOOM, -GLOBAL_AFFINE
};
-static const vpx_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
+static const aom_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
1] = { 224, 128, 128 };
#endif // CONFIG_GLOBAL_MOTION
@@ -128,7 +128,7 @@
return c ? CLASS0_SIZE << (c + 2) : 0;
}
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset) {
const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
? MV_CLASS_10
: (MV_CLASS_TYPE)log_in_base_2[z >> 3];
@@ -138,7 +138,7 @@
// TODO(jingning): This idle function is intentionally left as is for
// experimental purpose.
-int vp10_use_mv_hp(const MV *ref) {
+int av1_use_mv_hp(const MV *ref) {
(void)ref;
return 1;
}
@@ -151,7 +151,7 @@
comp_counts->sign[s] += incr;
z = (s ? -v : v) - 1; /* magnitude - 1 */
- c = vp10_get_mv_class(z, &o);
+ c = av1_get_mv_class(z, &o);
comp_counts->classes[c] += incr;
d = (o >> 3); /* int mv data */
@@ -171,9 +171,9 @@
}
}
-void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
+void av1_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
if (counts != NULL) {
- const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
+ const MV_JOINT_TYPE j = av1_get_mv_joint(mv);
#if CONFIG_REF_MV
++counts->zero_rmv[j == MV_JOINT_ZERO];
@@ -189,7 +189,7 @@
}
}
-void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
+void av1_adapt_mv_probs(AV1_COMMON *cm, int allow_hp) {
int i, j;
#if CONFIG_REF_MV
int idx;
@@ -199,10 +199,10 @@
&cm->frame_contexts[cm->frame_context_idx].nmvc[idx];
const nmv_context_counts *counts = &cm->counts.mv[idx];
- vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+ aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
fc->joints);
#if CONFIG_REF_MV
- fc->zero_rmv = vp10_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
+ fc->zero_rmv = av1_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
#endif
for (i = 0; i < 2; ++i) {
@@ -210,25 +210,25 @@
const nmv_component *pre_comp = &pre_fc->comps[i];
const nmv_component_counts *c = &counts->comps[i];
- comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
- vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+ comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+ aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
comp->classes);
- vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+ aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
comp->class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
- comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+ comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
for (j = 0; j < CLASS0_SIZE; ++j)
- vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
c->class0_fp[j], comp->class0_fp[j]);
- vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
if (allow_hp) {
comp->class0_hp =
- vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
- comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+ av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+ comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
}
}
}
@@ -237,7 +237,7 @@
const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
const nmv_context_counts *counts = &cm->counts.mv;
- vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+ aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
fc->joints);
for (i = 0; i < 2; ++i) {
@@ -245,31 +245,31 @@
const nmv_component *pre_comp = &pre_fc->comps[i];
const nmv_component_counts *c = &counts->comps[i];
- comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
- vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+ comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+ aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
comp->classes);
- vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+ aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
comp->class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
- comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+ comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
for (j = 0; j < CLASS0_SIZE; ++j)
- vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
c->class0_fp[j], comp->class0_fp[j]);
- vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
if (allow_hp) {
comp->class0_hp =
- vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
- comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+ av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+ comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
}
}
#endif
}
-void vp10_init_mv_probs(VP10_COMMON *cm) {
+void av1_init_mv_probs(AV1_COMMON *cm) {
#if CONFIG_REF_MV
int i;
for (i = 0; i < NMV_CONTEXTS; ++i) cm->fc->nmvc[i] = default_nmv_context;
@@ -277,6 +277,6 @@
cm->fc->nmvc = default_nmv_context;
#endif
#if CONFIG_GLOBAL_MOTION
- vp10_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
+ av1_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
#endif // CONFIG_GLOBAL_MOTION
}
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index c809a67..c6e0855 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ENTROPYMV_H_
-#define VP10_COMMON_ENTROPYMV_H_
+#ifndef AV1_COMMON_ENTROPYMV_H_
+#define AV1_COMMON_ENTROPYMV_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/prob.h"
@@ -21,12 +21,12 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
-void vp10_init_mv_probs(struct VP10Common *cm);
+void av1_init_mv_probs(struct AV1Common *cm);
-void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp);
-int vp10_use_mv_hp(const MV *ref);
+void av1_adapt_mv_probs(struct AV1Common *cm, int usehp);
+int av1_use_mv_hp(const MV *ref);
#define MV_UPDATE_PROB 252
@@ -76,31 +76,31 @@
#define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
#define MV_LOW (-(1 << MV_IN_USE_BITS))
-extern const vpx_tree_index vp10_mv_joint_tree[];
-extern const vpx_tree_index vp10_mv_class_tree[];
-extern const vpx_tree_index vp10_mv_class0_tree[];
-extern const vpx_tree_index vp10_mv_fp_tree[];
+extern const aom_tree_index av1_mv_joint_tree[];
+extern const aom_tree_index av1_mv_class_tree[];
+extern const aom_tree_index av1_mv_class0_tree[];
+extern const aom_tree_index av1_mv_fp_tree[];
typedef struct {
- vpx_prob sign;
- vpx_prob classes[MV_CLASSES - 1];
- vpx_prob class0[CLASS0_SIZE - 1];
- vpx_prob bits[MV_OFFSET_BITS];
- vpx_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
- vpx_prob fp[MV_FP_SIZE - 1];
- vpx_prob class0_hp;
- vpx_prob hp;
+ aom_prob sign;
+ aom_prob classes[MV_CLASSES - 1];
+ aom_prob class0[CLASS0_SIZE - 1];
+ aom_prob bits[MV_OFFSET_BITS];
+ aom_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
+ aom_prob fp[MV_FP_SIZE - 1];
+ aom_prob class0_hp;
+ aom_prob hp;
} nmv_component;
typedef struct {
- vpx_prob joints[MV_JOINTS - 1];
+ aom_prob joints[MV_JOINTS - 1];
#if CONFIG_REF_MV
- vpx_prob zero_rmv;
+ aom_prob zero_rmv;
#endif
nmv_component comps[2];
} nmv_context;
-static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
+static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) {
if (mv->row == 0) {
return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
} else {
@@ -108,7 +108,7 @@
}
}
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset);
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset);
typedef struct {
unsigned int sign[2];
@@ -129,15 +129,15 @@
nmv_component_counts comps[2];
} nmv_context_counts;
-void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
+void av1_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
#if CONFIG_GLOBAL_MOTION
-extern const vpx_tree_index
- vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
+extern const aom_tree_index
+ av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
#endif // CONFIG_GLOBAL_MOTION
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPYMV_H_
+#endif // AV1_COMMON_ENTROPYMV_H_
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 8cdec8e..899c8b9 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ENUMS_H_
-#define VP10_COMMON_ENUMS_H_
+#ifndef AV1_COMMON_ENUMS_H_
+#define AV1_COMMON_ENUMS_H_
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -211,20 +211,20 @@
#endif // CONFIG_EXT_TX
typedef enum {
- VPX_LAST_FLAG = 1 << 0,
+ AOM_LAST_FLAG = 1 << 0,
#if CONFIG_EXT_REFS
- VPX_LAST2_FLAG = 1 << 1,
- VPX_LAST3_FLAG = 1 << 2,
- VPX_GOLD_FLAG = 1 << 3,
- VPX_BWD_FLAG = 1 << 4,
- VPX_ALT_FLAG = 1 << 5,
- VPX_REFFRAME_ALL = (1 << 6) - 1
+ AOM_LAST2_FLAG = 1 << 1,
+ AOM_LAST3_FLAG = 1 << 2,
+ AOM_GOLD_FLAG = 1 << 3,
+ AOM_BWD_FLAG = 1 << 4,
+ AOM_ALT_FLAG = 1 << 5,
+ AOM_REFFRAME_ALL = (1 << 6) - 1
#else
- VPX_GOLD_FLAG = 1 << 1,
- VPX_ALT_FLAG = 1 << 2,
- VPX_REFFRAME_ALL = (1 << 3) - 1
+ AOM_GOLD_FLAG = 1 << 1,
+ AOM_ALT_FLAG = 1 << 2,
+ AOM_REFFRAME_ALL = (1 << 3) - 1
#endif // CONFIG_EXT_REFS
-} VPX_REFFRAME;
+} AOM_REFFRAME;
typedef enum { PLANE_TYPE_Y = 0, PLANE_TYPE_UV = 1, PLANE_TYPES } PLANE_TYPE;
@@ -435,4 +435,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ENUMS_H_
+#endif // AV1_COMMON_ENUMS_H_
diff --git a/av1/common/filter.c b/av1/common/filter.c
index 46eca5d..4881642 100644
--- a/av1/common/filter.c
+++ b/av1/common/filter.c
@@ -186,7 +186,7 @@
#endif // CONFIG_EXT_INTERP
#if CONFIG_EXT_INTRA
-const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS] = {
+const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS] = {
bilinear_filters, // INTRA_FILTER_LINEAR
sub_pel_filters_8, // INTRA_FILTER_8TAP
sub_pel_filters_8sharp, // INTRA_FILTER_8TAP_SHARP
@@ -196,7 +196,7 @@
#if CONFIG_EXT_INTERP
static const InterpFilterParams
- vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+ av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
{ (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
{ (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
{ (const int16_t *)sub_pel_filters_10sharp, 10, SUBPEL_SHIFTS },
@@ -206,7 +206,7 @@
};
#else
static const InterpFilterParams
- vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+ av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
{ (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
{ (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
{ (const int16_t *)sub_pel_filters_8sharp, SUBPEL_TAPS, SUBPEL_SHIFTS },
@@ -215,32 +215,31 @@
#endif // CONFIG_EXT_INTERP
#if USE_TEMPORALFILTER_12TAP
-static const InterpFilterParams vp10_interp_temporalfilter_12tap = {
+static const InterpFilterParams av1_interp_temporalfilter_12tap = {
(const int16_t *)sub_pel_filters_temporalfilter_12, 12, SUBPEL_SHIFTS
};
#endif // USE_TEMPORALFILTER_12TAP
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
const INTERP_FILTER interp_filter) {
#if USE_TEMPORALFILTER_12TAP
if (interp_filter == TEMPORALFILTER_12TAP)
- return vp10_interp_temporalfilter_12tap;
+ return av1_interp_temporalfilter_12tap;
#endif // USE_TEMPORALFILTER_12TAP
- return vp10_interp_filter_params_list[interp_filter];
+ return av1_interp_filter_params_list[interp_filter];
}
-const int16_t *vp10_get_interp_filter_kernel(
- const INTERP_FILTER interp_filter) {
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter) {
#if USE_TEMPORALFILTER_12TAP
if (interp_filter == TEMPORALFILTER_12TAP)
- return vp10_interp_temporalfilter_12tap.filter_ptr;
+ return av1_interp_temporalfilter_12tap.filter_ptr;
#endif // USE_TEMPORALFILTER_12TAP
- return (const int16_t *)vp10_interp_filter_params_list[interp_filter]
+ return (const int16_t *)av1_interp_filter_params_list[interp_filter]
.filter_ptr;
}
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
- int index) {
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+ int index) {
#if CONFIG_EXT_INTERP && HAVE_SSSE3
if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
return &sub_pel_filters_12sharp_signal_dir[index][0];
@@ -259,7 +258,7 @@
return NULL;
}
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
const InterpFilterParams p, int index) {
#if CONFIG_EXT_INTERP && HAVE_SSSE3
if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
@@ -279,8 +278,8 @@
return NULL;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
const InterpFilterParams p, int index) {
#if CONFIG_EXT_INTERP && HAVE_SSE4_1
if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 39fad23..c5a8521 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_FILTER_H_
-#define VP10_COMMON_FILTER_H_
+#ifndef AV1_COMMON_FILTER_H_
+#define AV1_COMMON_FILTER_H_
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
@@ -65,7 +65,7 @@
INTRA_FILTERS,
} INTRA_FILTER;
-extern const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS];
+extern const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS];
#endif // CONFIG_EXT_INTRA
typedef struct InterpFilterParams {
@@ -74,26 +74,26 @@
uint16_t subpel_shifts;
} InterpFilterParams;
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
const INTERP_FILTER interp_filter);
-const int16_t *vp10_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
-static INLINE const int16_t *vp10_get_interp_filter_subpel_kernel(
+static INLINE const int16_t *av1_get_interp_filter_subpel_kernel(
const InterpFilterParams filter_params, const int subpel) {
return filter_params.filter_ptr + filter_params.taps * subpel;
}
-static INLINE int vp10_is_interpolating_filter(
+static INLINE int av1_is_interpolating_filter(
const INTERP_FILTER interp_filter) {
- const InterpFilterParams ip = vp10_get_interp_filter_params(interp_filter);
+ const InterpFilterParams ip = av1_get_interp_filter_params(interp_filter);
return (ip.filter_ptr[ip.taps / 2 - 1] == 128);
}
#if USE_TEMPORALFILTER_12TAP
extern const int8_t sub_pel_filters_temporalfilter_12_signal_dir[15][2][16];
extern const int8_t sub_pel_filters_temporalfilter_12_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
extern const int16_t
sub_pel_filters_temporalfilter_12_highbd_ver_signal_dir[15][6][8];
#endif
@@ -104,24 +104,24 @@
extern const int8_t sub_pel_filters_10sharp_signal_dir[15][2][16];
extern const int8_t sub_pel_filters_12sharp_ver_signal_dir[15][6][16];
extern const int8_t sub_pel_filters_10sharp_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
extern const int16_t sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8];
extern const int16_t sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8];
#endif
#endif
typedef const int8_t (*SubpelFilterCoeffs)[16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef const int16_t (*HbdSubpelFilterCoeffs)[8];
#endif
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
- int index);
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+ int index);
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
const InterpFilterParams p, int index);
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
const InterpFilterParams p, int index);
#endif
@@ -129,4 +129,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_FILTER_H_
+#endif // AV1_COMMON_FILTER_H_
diff --git a/av1/common/frame_buffers.c b/av1/common/frame_buffers.c
index 5c736a9..89f4e4f 100644
--- a/av1/common/frame_buffers.c
+++ b/av1/common/frame_buffers.c
@@ -11,34 +11,34 @@
#include <assert.h>
#include "av1/common/frame_buffers.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
assert(list != NULL);
- vp10_free_internal_frame_buffers(list);
+ av1_free_internal_frame_buffers(list);
list->num_internal_frame_buffers =
- VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
- list->int_fb = (InternalFrameBuffer *)vpx_calloc(
+ AOM_MAXIMUM_REF_BUFFERS + AOM_MAXIMUM_WORK_BUFFERS;
+ list->int_fb = (InternalFrameBuffer *)aom_calloc(
list->num_internal_frame_buffers, sizeof(*list->int_fb));
return (list->int_fb == NULL);
}
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list) {
int i;
assert(list != NULL);
for (i = 0; i < list->num_internal_frame_buffers; ++i) {
- vpx_free(list->int_fb[i].data);
+ aom_free(list->int_fb[i].data);
list->int_fb[i].data = NULL;
}
- vpx_free(list->int_fb);
+ aom_free(list->int_fb);
list->int_fb = NULL;
}
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
- vpx_codec_frame_buffer_t *fb) {
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+ aom_codec_frame_buffer_t *fb) {
int i;
InternalFrameBufferList *const int_fb_list =
(InternalFrameBufferList *)cb_priv;
@@ -53,7 +53,7 @@
if (int_fb_list->int_fb[i].size < min_size) {
int_fb_list->int_fb[i].data =
- (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size);
+ (uint8_t *)aom_realloc(int_fb_list->int_fb[i].data, min_size);
if (!int_fb_list->int_fb[i].data) return -1;
// This memset is needed for fixing valgrind error from C loop filter
@@ -72,7 +72,7 @@
return 0;
}
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) {
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
(void)cb_priv;
if (int_fb) int_fb->in_use = 0;
diff --git a/av1/common/frame_buffers.h b/av1/common/frame_buffers.h
index 6667132..63253be 100644
--- a/av1/common/frame_buffers.h
+++ b/av1/common/frame_buffers.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_FRAME_BUFFERS_H_
-#define VP10_COMMON_FRAME_BUFFERS_H_
+#ifndef AV1_COMMON_FRAME_BUFFERS_H_
+#define AV1_COMMON_FRAME_BUFFERS_H_
-#include "aom/vpx_frame_buffer.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_frame_buffer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -30,24 +30,24 @@
} InternalFrameBufferList;
// Initializes |list|. Returns 0 on success.
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list);
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list);
// Free any data allocated to the frame buffers.
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list);
// Callback used by libaom to request an external frame buffer. |cb_priv|
// Callback private data, which points to an InternalFrameBufferList.
// |min_size| is the minimum size in bytes needed to decode the next frame.
// |fb| pointer to the frame buffer.
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
- vpx_codec_frame_buffer_t *fb);
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+ aom_codec_frame_buffer_t *fb);
// Callback used by libaom when there are no references to the frame buffer.
// |cb_priv| is not used. |fb| pointer to the frame buffer.
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb);
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_FRAME_BUFFERS_H_
+#endif // AV1_COMMON_FRAME_BUFFERS_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index 83b44d5..536e346 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -10,19 +10,19 @@
#include <math.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/blockd.h"
#include "av1/common/enums.h"
#include "av1/common/idct.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
const TX_SIZE tx_size) {
(void)tx_type;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
return txsize_sqr_up_map[tx_size] == TX_32X32;
}
@@ -70,7 +70,7 @@
// Note overall scaling factor is 4 times orthogonal
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_iidtx4_c(const tran_low_t *input, tran_low_t *output,
int bd) {
int i;
@@ -113,10 +113,10 @@
inputhalf[i] =
HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[i] * Sqrt2), bd);
}
- vpx_highbd_idct16_c(inputhalf, output + 16, bd);
+ aom_highbd_idct16_c(inputhalf, output + 16, bd);
// Note overall scaling factor is 4 times orthogonal
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Inverse identity transform and add.
static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
@@ -177,7 +177,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_idst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step[4];
tran_high_t temp1, temp2;
@@ -255,7 +255,7 @@
}
void highbd_idst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
- // vp9_highbd_igentx16(input, output, bd, Tx16);
+ // av1_highbd_igentx16(input, output, bd, Tx16);
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
(void)bd;
@@ -474,11 +474,11 @@
default: assert(0); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EXT_TX
-void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_4[] = {
{ idct4_c, idct4_c }, // DCT_DCT
{ iadst4_c, idct4_c }, // ADST_DCT
@@ -541,8 +541,8 @@
}
#if CONFIG_EXT_TX
-void vp10_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_4x8[] = {
{ idct8_c, idct4_c }, // DCT_DCT
{ iadst8_c, idct4_c }, // ADST_DCT
@@ -594,8 +594,8 @@
}
}
-void vp10_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_8x4[] = {
{ idct4_c, idct8_c }, // DCT_DCT
{ iadst4_c, idct8_c }, // ADST_DCT
@@ -647,8 +647,8 @@
}
}
-void vp10_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_8x16[] = {
{ idct16_c, idct8_c }, // DCT_DCT
{ iadst16_c, idct8_c }, // ADST_DCT
@@ -700,8 +700,8 @@
}
}
-void vp10_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_16x8[] = {
{ idct8_c, idct16_c }, // DCT_DCT
{ iadst8_c, idct16_c }, // ADST_DCT
@@ -753,8 +753,8 @@
}
}
-void vp10_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_16x32[] = {
{ idct32_c, idct16_c }, // DCT_DCT
{ ihalfright32_c, idct16_c }, // ADST_DCT
@@ -806,8 +806,8 @@
}
}
-void vp10_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_32x16[] = {
{ idct16_c, idct32_c }, // DCT_DCT
{ iadst16_c, idct32_c }, // ADST_DCT
@@ -860,8 +860,8 @@
}
#endif // CONFIG_EXT_TX
-void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_8[] = {
{ idct8_c, idct8_c }, // DCT_DCT
{ iadst8_c, idct8_c }, // ADST_DCT
@@ -923,8 +923,8 @@
}
}
-void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_16[] = {
{ idct16_c, idct16_c }, // DCT_DCT
{ iadst16_c, idct16_c }, // ADST_DCT
@@ -987,8 +987,8 @@
}
#if CONFIG_EXT_TX
-void vp10_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
- int stride, int tx_type) {
+void av1_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
static const transform_2d IHT_32[] = {
{ idct32_c, idct32_c }, // DCT_DCT
{ ihalfright32_c, idct32_c }, // ADST_DCT
@@ -1048,82 +1048,82 @@
#endif // CONFIG_EXT_TX
// idct
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob) {
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob) {
if (eob > 1)
- vpx_idct4x4_16_add(input, dest, stride);
+ aom_idct4x4_16_add(input, dest, stride);
else
- vpx_idct4x4_1_add(input, dest, stride);
+ aom_idct4x4_1_add(input, dest, stride);
}
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob) {
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob) {
if (eob > 1)
- vpx_iwht4x4_16_add(input, dest, stride);
+ aom_iwht4x4_16_add(input, dest, stride);
else
- vpx_iwht4x4_1_add(input, dest, stride);
+ aom_iwht4x4_1_add(input, dest, stride);
}
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob) {
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob) {
// If dc is 1, then input[0] is the reconstructed value, do not need
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to decide what to do.
- // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
// Combine that with code here.
if (eob == 1)
// DC only DCT coefficient
- vpx_idct8x8_1_add(input, dest, stride);
+ aom_idct8x8_1_add(input, dest, stride);
else if (eob <= 12)
- vpx_idct8x8_12_add(input, dest, stride);
+ aom_idct8x8_12_add(input, dest, stride);
else
- vpx_idct8x8_64_add(input, dest, stride);
+ aom_idct8x8_64_add(input, dest, stride);
}
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob) {
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob) {
/* The calculation can be simplified if there are not many non-zero dct
* coefficients. Use eobs to separate different cases. */
if (eob == 1) /* DC only DCT coefficient. */
- vpx_idct16x16_1_add(input, dest, stride);
+ aom_idct16x16_1_add(input, dest, stride);
else if (eob <= 10)
- vpx_idct16x16_10_add(input, dest, stride);
+ aom_idct16x16_10_add(input, dest, stride);
else
- vpx_idct16x16_256_add(input, dest, stride);
+ aom_idct16x16_256_add(input, dest, stride);
}
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob) {
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob) {
if (eob == 1)
- vpx_idct32x32_1_add(input, dest, stride);
+ aom_idct32x32_1_add(input, dest, stride);
else if (eob <= 34)
// non-zero coeff only in upper-left 8x8
- vpx_idct32x32_34_add(input, dest, stride);
+ aom_idct32x32_34_add(input, dest, stride);
else
- vpx_idct32x32_1024_add(input, dest, stride);
+ aom_idct32x32_1024_add(input, dest, stride);
}
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type, int lossless) {
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type, int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_iwht4x4_add(input, dest, stride, eob);
+ av1_iwht4x4_add(input, dest, stride, eob);
return;
}
switch (tx_type) {
- case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct4x4_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
- case FLIPADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+ case FLIPADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
case V_DCT:
case H_DCT:
case V_ADST:
@@ -1131,7 +1131,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_iht4x4_16_add_c(input, dest, stride, tx_type);
+ av1_iht4x4_16_add_c(input, dest, stride, tx_type);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 4, tx_type); break;
#endif // CONFIG_EXT_TX
@@ -1140,56 +1140,56 @@
}
#if CONFIG_EXT_TX
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type) {
+ (void)eob;
+ av1_iht4x8_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type) {
+ (void)eob;
+ av1_iht8x4_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
(void)eob;
- vp10_iht4x8_32_add(input, dest, stride, tx_type);
+ av1_iht8x16_128_add(input, dest, stride, tx_type);
}
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
(void)eob;
- vp10_iht8x4_32_add(input, dest, stride, tx_type);
+ av1_iht16x8_128_add(input, dest, stride, tx_type);
}
-void vp10_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
(void)eob;
- vp10_iht8x16_128_add(input, dest, stride, tx_type);
+ av1_iht16x32_512_add(input, dest, stride, tx_type);
}
-void vp10_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
(void)eob;
- vp10_iht16x8_128_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type) {
- (void)eob;
- vp10_iht16x32_512_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type) {
- (void)eob;
- vp10_iht32x16_512_add(input, dest, stride, tx_type);
+ av1_iht32x16_512_add(input, dest, stride, tx_type);
}
#endif // CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct8x8_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
- case FLIPADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+ case FLIPADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
case V_DCT:
case H_DCT:
case V_ADST:
@@ -1197,7 +1197,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_iht8x8_64_add_c(input, dest, stride, tx_type);
+ av1_iht8x8_64_add_c(input, dest, stride, tx_type);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 8, tx_type); break;
#endif // CONFIG_EXT_TX
@@ -1205,20 +1205,20 @@
}
}
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct16x16_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht16x16_256_add(input, dest, stride, tx_type); break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_iht16x16_256_add(input, dest, stride, tx_type);
+ av1_iht16x16_256_add(input, dest, stride, tx_type);
break;
case V_DCT:
case H_DCT:
@@ -1227,7 +1227,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_iht16x16_256_add_c(input, dest, stride, tx_type);
+ av1_iht16x16_256_add_c(input, dest, stride, tx_type);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 16, tx_type); break;
#endif // CONFIG_EXT_TX
@@ -1235,10 +1235,10 @@
}
}
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct32x32_add(input, dest, stride, eob); break;
#if CONFIG_EXT_TX
case ADST_DCT:
case DCT_ADST:
@@ -1254,7 +1254,7 @@
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
- vp10_iht32x32_1024_add_c(input, dest, stride, tx_type);
+ av1_iht32x32_1024_add_c(input, dest, stride, tx_type);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 32, tx_type); break;
#endif // CONFIG_EXT_TX
@@ -1262,27 +1262,27 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_4[] = {
- { vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT
- { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // ADST_DCT
- { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }, // ADST_ADST
+ { aom_highbd_idct4_c, aom_highbd_idct4_c }, // DCT_DCT
+ { aom_highbd_iadst4_c, aom_highbd_idct4_c }, // ADST_DCT
+ { aom_highbd_idct4_c, aom_highbd_iadst4_c }, // DCT_ADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst4_c }, // ADST_ADST
#if CONFIG_EXT_TX
- { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // FLIPADST_DCT
- { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }, // ADST_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }, // FLIPADST_ADST
+ { aom_highbd_iadst4_c, aom_highbd_idct4_c }, // FLIPADST_DCT
+ { aom_highbd_idct4_c, aom_highbd_iadst4_c }, // DCT_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst4_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst4_c }, // ADST_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst4_c }, // FLIPADST_ADST
{ highbd_iidtx4_c, highbd_iidtx4_c }, // IDTX
- { vpx_highbd_idct4_c, highbd_iidtx4_c }, // V_DCT
- { highbd_iidtx4_c, vpx_highbd_idct4_c }, // H_DCT
- { vpx_highbd_iadst4_c, highbd_iidtx4_c }, // V_ADST
- { highbd_iidtx4_c, vpx_highbd_iadst4_c }, // H_ADST
- { vpx_highbd_iadst4_c, highbd_iidtx4_c }, // V_FLIPADST
- { highbd_iidtx4_c, vpx_highbd_iadst4_c }, // H_FLIPADST
+ { aom_highbd_idct4_c, highbd_iidtx4_c }, // V_DCT
+ { highbd_iidtx4_c, aom_highbd_idct4_c }, // H_DCT
+ { aom_highbd_iadst4_c, highbd_iidtx4_c }, // V_ADST
+ { highbd_iidtx4_c, aom_highbd_iadst4_c }, // H_ADST
+ { aom_highbd_iadst4_c, highbd_iidtx4_c }, // V_FLIPADST
+ { highbd_iidtx4_c, aom_highbd_iadst4_c }, // H_FLIPADST
#endif // CONFIG_EXT_TX
};
@@ -1330,25 +1330,25 @@
}
#if CONFIG_EXT_TX
-void vp10_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_4x8[] = {
- { vpx_highbd_idct8_c, vpx_highbd_idct4_c }, // DCT_DCT
- { vpx_highbd_iadst8_c, vpx_highbd_idct4_c }, // ADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst4_c }, // DCT_ADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c }, // ADST_ADST
- { vpx_highbd_iadst8_c, vpx_highbd_idct4_c }, // FLIPADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst4_c }, // DCT_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c }, // ADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c }, // FLIPADST_ADST
+ { aom_highbd_idct8_c, aom_highbd_idct4_c }, // DCT_DCT
+ { aom_highbd_iadst8_c, aom_highbd_idct4_c }, // ADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst4_c }, // DCT_ADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst4_c }, // ADST_ADST
+ { aom_highbd_iadst8_c, aom_highbd_idct4_c }, // FLIPADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst4_c }, // DCT_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst4_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst4_c }, // ADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst4_c }, // FLIPADST_ADST
{ highbd_iidtx8_c, highbd_iidtx4_c }, // IDTX
- { vpx_highbd_idct8_c, highbd_iidtx4_c }, // V_DCT
- { highbd_iidtx8_c, vpx_highbd_idct4_c }, // H_DCT
- { vpx_highbd_iadst8_c, highbd_iidtx4_c }, // V_ADST
- { highbd_iidtx8_c, vpx_highbd_iadst4_c }, // H_ADST
- { vpx_highbd_iadst8_c, highbd_iidtx4_c }, // V_FLIPADST
- { highbd_iidtx8_c, vpx_highbd_iadst4_c }, // H_FLIPADST
+ { aom_highbd_idct8_c, highbd_iidtx4_c }, // V_DCT
+ { highbd_iidtx8_c, aom_highbd_idct4_c }, // H_DCT
+ { aom_highbd_iadst8_c, highbd_iidtx4_c }, // V_ADST
+ { highbd_iidtx8_c, aom_highbd_iadst4_c }, // H_ADST
+ { aom_highbd_iadst8_c, highbd_iidtx4_c }, // V_FLIPADST
+ { highbd_iidtx8_c, aom_highbd_iadst4_c }, // H_FLIPADST
};
const int n = 4;
const int n2 = 8;
@@ -1388,25 +1388,25 @@
}
}
-void vp10_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_8x4[] = {
- { vpx_highbd_idct4_c, vpx_highbd_idct8_c }, // DCT_DCT
- { vpx_highbd_iadst4_c, vpx_highbd_idct8_c }, // ADST_DCT
- { vpx_highbd_idct4_c, vpx_highbd_iadst8_c }, // DCT_ADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c }, // ADST_ADST
- { vpx_highbd_iadst4_c, vpx_highbd_idct8_c }, // FLIPADST_DCT
- { vpx_highbd_idct4_c, vpx_highbd_iadst8_c }, // DCT_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c }, // ADST_FLIPADST
- { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c }, // FLIPADST_ADST
+ { aom_highbd_idct4_c, aom_highbd_idct8_c }, // DCT_DCT
+ { aom_highbd_iadst4_c, aom_highbd_idct8_c }, // ADST_DCT
+ { aom_highbd_idct4_c, aom_highbd_iadst8_c }, // DCT_ADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst8_c }, // ADST_ADST
+ { aom_highbd_iadst4_c, aom_highbd_idct8_c }, // FLIPADST_DCT
+ { aom_highbd_idct4_c, aom_highbd_iadst8_c }, // DCT_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst8_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst8_c }, // ADST_FLIPADST
+ { aom_highbd_iadst4_c, aom_highbd_iadst8_c }, // FLIPADST_ADST
{ highbd_iidtx4_c, highbd_iidtx8_c }, // IDTX
- { vpx_highbd_idct4_c, highbd_iidtx8_c }, // V_DCT
- { highbd_iidtx4_c, vpx_highbd_idct8_c }, // H_DCT
- { vpx_highbd_iadst4_c, highbd_iidtx8_c }, // V_ADST
- { highbd_iidtx4_c, vpx_highbd_iadst8_c }, // H_ADST
- { vpx_highbd_iadst4_c, highbd_iidtx8_c }, // V_FLIPADST
- { highbd_iidtx4_c, vpx_highbd_iadst8_c }, // H_FLIPADST
+ { aom_highbd_idct4_c, highbd_iidtx8_c }, // V_DCT
+ { highbd_iidtx4_c, aom_highbd_idct8_c }, // H_DCT
+ { aom_highbd_iadst4_c, highbd_iidtx8_c }, // V_ADST
+ { highbd_iidtx4_c, aom_highbd_iadst8_c }, // H_ADST
+ { aom_highbd_iadst4_c, highbd_iidtx8_c }, // V_FLIPADST
+ { highbd_iidtx4_c, aom_highbd_iadst8_c }, // H_FLIPADST
};
const int n = 4;
const int n2 = 8;
@@ -1446,25 +1446,25 @@
}
}
-void vp10_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_8x16[] = {
- { vpx_highbd_idct16_c, vpx_highbd_idct8_c }, // DCT_DCT
- { vpx_highbd_iadst16_c, vpx_highbd_idct8_c }, // ADST_DCT
- { vpx_highbd_idct16_c, vpx_highbd_iadst8_c }, // DCT_ADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c }, // ADST_ADST
- { vpx_highbd_iadst16_c, vpx_highbd_idct8_c }, // FLIPADST_DCT
- { vpx_highbd_idct16_c, vpx_highbd_iadst8_c }, // DCT_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c }, // ADST_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c }, // FLIPADST_ADST
+ { aom_highbd_idct16_c, aom_highbd_idct8_c }, // DCT_DCT
+ { aom_highbd_iadst16_c, aom_highbd_idct8_c }, // ADST_DCT
+ { aom_highbd_idct16_c, aom_highbd_iadst8_c }, // DCT_ADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst8_c }, // ADST_ADST
+ { aom_highbd_iadst16_c, aom_highbd_idct8_c }, // FLIPADST_DCT
+ { aom_highbd_idct16_c, aom_highbd_iadst8_c }, // DCT_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst8_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst8_c }, // ADST_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst8_c }, // FLIPADST_ADST
{ highbd_iidtx16_c, highbd_iidtx8_c }, // IDTX
- { vpx_highbd_idct16_c, highbd_iidtx8_c }, // V_DCT
- { highbd_iidtx16_c, vpx_highbd_idct8_c }, // H_DCT
- { vpx_highbd_iadst16_c, highbd_iidtx8_c }, // V_ADST
- { highbd_iidtx16_c, vpx_highbd_iadst8_c }, // H_ADST
- { vpx_highbd_iadst16_c, highbd_iidtx8_c }, // V_FLIPADST
- { highbd_iidtx16_c, vpx_highbd_iadst8_c }, // H_FLIPADST
+ { aom_highbd_idct16_c, highbd_iidtx8_c }, // V_DCT
+ { highbd_iidtx16_c, aom_highbd_idct8_c }, // H_DCT
+ { aom_highbd_iadst16_c, highbd_iidtx8_c }, // V_ADST
+ { highbd_iidtx16_c, aom_highbd_iadst8_c }, // H_ADST
+ { aom_highbd_iadst16_c, highbd_iidtx8_c }, // V_FLIPADST
+ { highbd_iidtx16_c, aom_highbd_iadst8_c }, // H_FLIPADST
};
const int n = 8;
const int n2 = 16;
@@ -1503,25 +1503,25 @@
}
}
-void vp10_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_16x8[] = {
- { vpx_highbd_idct8_c, vpx_highbd_idct16_c }, // DCT_DCT
- { vpx_highbd_iadst8_c, vpx_highbd_idct16_c }, // ADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst16_c }, // DCT_ADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c }, // ADST_ADST
- { vpx_highbd_iadst8_c, vpx_highbd_idct16_c }, // FLIPADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst16_c }, // DCT_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c }, // ADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c }, // FLIPADST_ADST
+ { aom_highbd_idct8_c, aom_highbd_idct16_c }, // DCT_DCT
+ { aom_highbd_iadst8_c, aom_highbd_idct16_c }, // ADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst16_c }, // DCT_ADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst16_c }, // ADST_ADST
+ { aom_highbd_iadst8_c, aom_highbd_idct16_c }, // FLIPADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst16_c }, // DCT_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst16_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst16_c }, // ADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst16_c }, // FLIPADST_ADST
{ highbd_iidtx8_c, highbd_iidtx16_c }, // IDTX
- { vpx_highbd_idct8_c, highbd_iidtx16_c }, // V_DCT
- { highbd_iidtx8_c, vpx_highbd_idct16_c }, // H_DCT
- { vpx_highbd_iadst8_c, highbd_iidtx16_c }, // V_ADST
- { highbd_iidtx8_c, vpx_highbd_iadst16_c }, // H_ADST
- { vpx_highbd_iadst8_c, highbd_iidtx16_c }, // V_FLIPADST
- { highbd_iidtx8_c, vpx_highbd_iadst16_c }, // H_FLIPADST
+ { aom_highbd_idct8_c, highbd_iidtx16_c }, // V_DCT
+ { highbd_iidtx8_c, aom_highbd_idct16_c }, // H_DCT
+ { aom_highbd_iadst8_c, highbd_iidtx16_c }, // V_ADST
+ { highbd_iidtx8_c, aom_highbd_iadst16_c }, // H_ADST
+ { aom_highbd_iadst8_c, highbd_iidtx16_c }, // V_FLIPADST
+ { highbd_iidtx8_c, aom_highbd_iadst16_c }, // H_FLIPADST
};
const int n = 8;
const int n2 = 16;
@@ -1560,25 +1560,25 @@
}
}
-void vp10_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_16x32[] = {
- { vpx_highbd_idct32_c, vpx_highbd_idct16_c }, // DCT_DCT
- { highbd_ihalfright32_c, vpx_highbd_idct16_c }, // ADST_DCT
- { vpx_highbd_idct32_c, vpx_highbd_iadst16_c }, // DCT_ADST
- { highbd_ihalfright32_c, vpx_highbd_iadst16_c }, // ADST_ADST
- { highbd_ihalfright32_c, vpx_highbd_idct16_c }, // FLIPADST_DCT
- { vpx_highbd_idct32_c, vpx_highbd_iadst16_c }, // DCT_FLIPADST
- { highbd_ihalfright32_c, vpx_highbd_iadst16_c }, // FLIPADST_FLIPADST
- { highbd_ihalfright32_c, vpx_highbd_iadst16_c }, // ADST_FLIPADST
- { highbd_ihalfright32_c, vpx_highbd_iadst16_c }, // FLIPADST_ADST
+ { aom_highbd_idct32_c, aom_highbd_idct16_c }, // DCT_DCT
+ { highbd_ihalfright32_c, aom_highbd_idct16_c }, // ADST_DCT
+ { aom_highbd_idct32_c, aom_highbd_iadst16_c }, // DCT_ADST
+ { highbd_ihalfright32_c, aom_highbd_iadst16_c }, // ADST_ADST
+ { highbd_ihalfright32_c, aom_highbd_idct16_c }, // FLIPADST_DCT
+ { aom_highbd_idct32_c, aom_highbd_iadst16_c }, // DCT_FLIPADST
+ { highbd_ihalfright32_c, aom_highbd_iadst16_c }, // FLIPADST_FLIPADST
+ { highbd_ihalfright32_c, aom_highbd_iadst16_c }, // ADST_FLIPADST
+ { highbd_ihalfright32_c, aom_highbd_iadst16_c }, // FLIPADST_ADST
{ highbd_iidtx32_c, highbd_iidtx16_c }, // IDTX
- { vpx_highbd_idct32_c, highbd_iidtx16_c }, // V_DCT
- { highbd_iidtx32_c, vpx_highbd_idct16_c }, // H_DCT
+ { aom_highbd_idct32_c, highbd_iidtx16_c }, // V_DCT
+ { highbd_iidtx32_c, aom_highbd_idct16_c }, // H_DCT
{ highbd_ihalfright32_c, highbd_iidtx16_c }, // V_ADST
- { highbd_iidtx32_c, vpx_highbd_iadst16_c }, // H_ADST
+ { highbd_iidtx32_c, aom_highbd_iadst16_c }, // H_ADST
{ highbd_ihalfright32_c, highbd_iidtx16_c }, // V_FLIPADST
- { highbd_iidtx32_c, vpx_highbd_iadst16_c }, // H_FLIPADST
+ { highbd_iidtx32_c, aom_highbd_iadst16_c }, // H_FLIPADST
};
const int n = 16;
const int n2 = 32;
@@ -1617,24 +1617,24 @@
}
}
-void vp10_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_32x16[] = {
- { vpx_highbd_idct16_c, vpx_highbd_idct32_c }, // DCT_DCT
- { vpx_highbd_iadst16_c, vpx_highbd_idct32_c }, // ADST_DCT
- { vpx_highbd_idct16_c, highbd_ihalfright32_c }, // DCT_ADST
- { vpx_highbd_iadst16_c, highbd_ihalfright32_c }, // ADST_ADST
- { vpx_highbd_iadst16_c, vpx_highbd_idct32_c }, // FLIPADST_DCT
- { vpx_highbd_idct16_c, highbd_ihalfright32_c }, // DCT_FLIPADST
- { vpx_highbd_iadst16_c, highbd_ihalfright32_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst16_c, highbd_ihalfright32_c }, // ADST_FLIPADST
- { vpx_highbd_iadst16_c, highbd_ihalfright32_c }, // FLIPADST_ADST
+ { aom_highbd_idct16_c, aom_highbd_idct32_c }, // DCT_DCT
+ { aom_highbd_iadst16_c, aom_highbd_idct32_c }, // ADST_DCT
+ { aom_highbd_idct16_c, highbd_ihalfright32_c }, // DCT_ADST
+ { aom_highbd_iadst16_c, highbd_ihalfright32_c }, // ADST_ADST
+ { aom_highbd_iadst16_c, aom_highbd_idct32_c }, // FLIPADST_DCT
+ { aom_highbd_idct16_c, highbd_ihalfright32_c }, // DCT_FLIPADST
+ { aom_highbd_iadst16_c, highbd_ihalfright32_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst16_c, highbd_ihalfright32_c }, // ADST_FLIPADST
+ { aom_highbd_iadst16_c, highbd_ihalfright32_c }, // FLIPADST_ADST
{ highbd_iidtx16_c, highbd_iidtx32_c }, // IDTX
- { vpx_highbd_idct16_c, highbd_iidtx32_c }, // V_DCT
- { highbd_iidtx16_c, vpx_highbd_idct32_c }, // H_DCT
- { vpx_highbd_iadst16_c, highbd_iidtx32_c }, // V_ADST
+ { aom_highbd_idct16_c, highbd_iidtx32_c }, // V_DCT
+ { highbd_iidtx16_c, aom_highbd_idct32_c }, // H_DCT
+ { aom_highbd_iadst16_c, highbd_iidtx32_c }, // V_ADST
{ highbd_iidtx16_c, highbd_ihalfright32_c }, // H_ADST
- { vpx_highbd_iadst16_c, highbd_iidtx32_c }, // V_FLIPADST
+ { aom_highbd_iadst16_c, highbd_iidtx32_c }, // V_FLIPADST
{ highbd_iidtx16_c, highbd_ihalfright32_c }, // H_FLIPADST
};
const int n = 16;
@@ -1675,26 +1675,26 @@
}
#endif // CONFIG_EXT_TX
-void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_8[] = {
- { vpx_highbd_idct8_c, vpx_highbd_idct8_c }, // DCT_DCT
- { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // ADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_ADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }, // ADST_ADST
+ { aom_highbd_idct8_c, aom_highbd_idct8_c }, // DCT_DCT
+ { aom_highbd_iadst8_c, aom_highbd_idct8_c }, // ADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst8_c }, // DCT_ADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst8_c }, // ADST_ADST
#if CONFIG_EXT_TX
- { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // FLIPADST_DCT
- { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }, // ADST_FLIPADST
- { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }, // FLIPADST_ADST
+ { aom_highbd_iadst8_c, aom_highbd_idct8_c }, // FLIPADST_DCT
+ { aom_highbd_idct8_c, aom_highbd_iadst8_c }, // DCT_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst8_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst8_c }, // ADST_FLIPADST
+ { aom_highbd_iadst8_c, aom_highbd_iadst8_c }, // FLIPADST_ADST
{ highbd_iidtx8_c, highbd_iidtx8_c }, // IDTX
- { vpx_highbd_idct8_c, highbd_iidtx8_c }, // V_DCT
- { highbd_iidtx8_c, vpx_highbd_idct8_c }, // H_DCT
- { vpx_highbd_iadst8_c, highbd_iidtx8_c }, // V_ADST
- { highbd_iidtx8_c, vpx_highbd_iadst8_c }, // H_ADST
- { vpx_highbd_iadst8_c, highbd_iidtx8_c }, // V_FLIPADST
- { highbd_iidtx8_c, vpx_highbd_iadst8_c }, // H_FLIPADST
+ { aom_highbd_idct8_c, highbd_iidtx8_c }, // V_DCT
+ { highbd_iidtx8_c, aom_highbd_idct8_c }, // H_DCT
+ { aom_highbd_iadst8_c, highbd_iidtx8_c }, // V_ADST
+ { highbd_iidtx8_c, aom_highbd_iadst8_c }, // H_ADST
+ { aom_highbd_iadst8_c, highbd_iidtx8_c }, // V_FLIPADST
+ { highbd_iidtx8_c, aom_highbd_iadst8_c }, // H_FLIPADST
#endif // CONFIG_EXT_TX
};
@@ -1741,26 +1741,26 @@
}
}
-void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_16[] = {
- { vpx_highbd_idct16_c, vpx_highbd_idct16_c }, // DCT_DCT
- { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // ADST_DCT
- { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_ADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }, // ADST_ADST
+ { aom_highbd_idct16_c, aom_highbd_idct16_c }, // DCT_DCT
+ { aom_highbd_iadst16_c, aom_highbd_idct16_c }, // ADST_DCT
+ { aom_highbd_idct16_c, aom_highbd_iadst16_c }, // DCT_ADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst16_c }, // ADST_ADST
#if CONFIG_EXT_TX
- { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // FLIPADST_DCT
- { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }, // FLIPADST_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }, // ADST_FLIPADST
- { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }, // FLIPADST_ADST
+ { aom_highbd_iadst16_c, aom_highbd_idct16_c }, // FLIPADST_DCT
+ { aom_highbd_idct16_c, aom_highbd_iadst16_c }, // DCT_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst16_c }, // FLIPADST_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst16_c }, // ADST_FLIPADST
+ { aom_highbd_iadst16_c, aom_highbd_iadst16_c }, // FLIPADST_ADST
{ highbd_iidtx16_c, highbd_iidtx16_c }, // IDTX
- { vpx_highbd_idct16_c, highbd_iidtx16_c }, // V_DCT
- { highbd_iidtx16_c, vpx_highbd_idct16_c }, // H_DCT
- { vpx_highbd_iadst16_c, highbd_iidtx16_c }, // V_ADST
- { highbd_iidtx16_c, vpx_highbd_iadst16_c }, // H_ADST
- { vpx_highbd_iadst16_c, highbd_iidtx16_c }, // V_FLIPADST
- { highbd_iidtx16_c, vpx_highbd_iadst16_c }, // H_FLIPADST
+ { aom_highbd_idct16_c, highbd_iidtx16_c }, // V_DCT
+ { highbd_iidtx16_c, aom_highbd_idct16_c }, // H_DCT
+ { aom_highbd_iadst16_c, highbd_iidtx16_c }, // V_ADST
+ { highbd_iidtx16_c, aom_highbd_iadst16_c }, // H_ADST
+ { aom_highbd_iadst16_c, highbd_iidtx16_c }, // V_FLIPADST
+ { highbd_iidtx16_c, aom_highbd_iadst16_c }, // H_FLIPADST
#endif // CONFIG_EXT_TX
};
@@ -1808,21 +1808,21 @@
}
#if CONFIG_EXT_TX
-void vp10_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int tx_type, int bd) {
+void av1_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+ int stride, int tx_type, int bd) {
static const highbd_transform_2d HIGH_IHT_32[] = {
- { vpx_highbd_idct32_c, vpx_highbd_idct32_c }, // DCT_DCT
- { highbd_ihalfright32_c, vpx_highbd_idct32_c }, // ADST_DCT
- { vpx_highbd_idct32_c, highbd_ihalfright32_c }, // DCT_ADST
+ { aom_highbd_idct32_c, aom_highbd_idct32_c }, // DCT_DCT
+ { highbd_ihalfright32_c, aom_highbd_idct32_c }, // ADST_DCT
+ { aom_highbd_idct32_c, highbd_ihalfright32_c }, // DCT_ADST
{ highbd_ihalfright32_c, highbd_ihalfright32_c }, // ADST_ADST
- { highbd_ihalfright32_c, vpx_highbd_idct32_c }, // FLIPADST_DCT
- { vpx_highbd_idct32_c, highbd_ihalfright32_c }, // DCT_FLIPADST
+ { highbd_ihalfright32_c, aom_highbd_idct32_c }, // FLIPADST_DCT
+ { aom_highbd_idct32_c, highbd_ihalfright32_c }, // DCT_FLIPADST
{ highbd_ihalfright32_c, highbd_ihalfright32_c }, // FLIPADST_FLIPADST
{ highbd_ihalfright32_c, highbd_ihalfright32_c }, // ADST_FLIPADST
{ highbd_ihalfright32_c, highbd_ihalfright32_c }, // FLIPADST_ADST
{ highbd_iidtx32_c, highbd_iidtx32_c }, // IDTX
- { vpx_highbd_idct32_c, highbd_iidtx32_c }, // V_DCT
- { highbd_iidtx32_c, vpx_highbd_idct32_c }, // H_DCT
+ { aom_highbd_idct32_c, highbd_iidtx32_c }, // V_DCT
+ { highbd_iidtx32_c, aom_highbd_idct32_c }, // H_DCT
{ highbd_ihalfright32_c, highbd_iidtx32_c }, // V_ADST
{ highbd_iidtx32_c, highbd_ihalfright32_c }, // H_ADST
{ highbd_ihalfright32_c, highbd_iidtx32_c }, // V_FLIPADST
@@ -1872,73 +1872,73 @@
#endif // CONFIG_EXT_TX
// idct
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd) {
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd) {
if (eob > 1)
- vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
+ aom_highbd_idct4x4_16_add(input, dest, stride, bd);
else
- vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
+ aom_highbd_idct4x4_1_add(input, dest, stride, bd);
}
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd) {
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd) {
if (eob > 1)
- vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
+ aom_highbd_iwht4x4_16_add(input, dest, stride, bd);
else
- vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
+ aom_highbd_iwht4x4_1_add(input, dest, stride, bd);
}
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd) {
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd) {
// If dc is 1, then input[0] is the reconstructed value, do not need
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to decide what to do.
- // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
// Combine that with code here.
// DC only DCT coefficient
if (eob == 1) {
- vpx_highbd_idct8x8_1_add(input, dest, stride, bd);
+ aom_highbd_idct8x8_1_add(input, dest, stride, bd);
} else if (eob <= 10) {
- vpx_highbd_idct8x8_10_add(input, dest, stride, bd);
+ aom_highbd_idct8x8_10_add(input, dest, stride, bd);
} else {
- vpx_highbd_idct8x8_64_add(input, dest, stride, bd);
+ aom_highbd_idct8x8_64_add(input, dest, stride, bd);
}
}
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd) {
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd) {
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to separate different cases.
// DC only DCT coefficient.
if (eob == 1) {
- vpx_highbd_idct16x16_1_add(input, dest, stride, bd);
+ aom_highbd_idct16x16_1_add(input, dest, stride, bd);
} else if (eob <= 10) {
- vpx_highbd_idct16x16_10_add(input, dest, stride, bd);
+ aom_highbd_idct16x16_10_add(input, dest, stride, bd);
} else {
- vpx_highbd_idct16x16_256_add(input, dest, stride, bd);
+ aom_highbd_idct16x16_256_add(input, dest, stride, bd);
}
}
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd) {
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd) {
// Non-zero coeff only in upper-left 8x8
if (eob == 1) {
- vpx_highbd_idct32x32_1_add(input, dest, stride, bd);
+ aom_highbd_idct32x32_1_add(input, dest, stride, bd);
} else if (eob <= 34) {
- vpx_highbd_idct32x32_34_add(input, dest, stride, bd);
+ aom_highbd_idct32x32_34_add(input, dest, stride, bd);
} else {
- vpx_highbd_idct32x32_1024_add(input, dest, stride, bd);
+ aom_highbd_idct32x32_1024_add(input, dest, stride, bd);
}
}
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd, TX_TYPE tx_type,
- int lossless) {
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type,
+ int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd);
+ av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
return;
}
@@ -1947,8 +1947,8 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
- bd);
+ av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+ bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -1956,8 +1956,8 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
- bd);
+ av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+ bd);
break;
case V_DCT:
case H_DCT:
@@ -1966,7 +1966,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
break;
case IDTX:
highbd_inv_idtx_add_c(input, dest, stride, 4, tx_type, bd);
@@ -1977,60 +1977,57 @@
}
#if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type) {
+ (void)eob;
+ av1_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type) {
+ (void)eob;
+ av1_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
(void)eob;
- vp10_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
}
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
(void)eob;
- vp10_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
}
-void vp10_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
(void)eob;
- vp10_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
}
-void vp10_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
(void)eob;
- vp10_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type) {
- (void)eob;
- vp10_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type) {
- (void)eob;
- vp10_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
}
#endif // CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type) {
(void)eob;
switch (tx_type) {
case DCT_DCT:
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
- bd);
+ av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+ bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -2038,8 +2035,8 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
- bd);
+ av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+ bd);
break;
case V_DCT:
case H_DCT:
@@ -2048,7 +2045,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
break;
case IDTX:
highbd_inv_idtx_add_c(input, dest, stride, 8, tx_type, bd);
@@ -2058,17 +2055,17 @@
}
}
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd,
+ TX_TYPE tx_type) {
(void)eob;
switch (tx_type) {
case DCT_DCT:
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- tx_type, bd);
+ av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+ tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -2076,8 +2073,8 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- tx_type, bd);
+ av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+ tx_type, bd);
break;
case V_DCT:
case H_DCT:
@@ -2086,7 +2083,7 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
- vp10_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
break;
case IDTX:
highbd_inv_idtx_add_c(input, dest, stride, 16, tx_type, bd);
@@ -2096,14 +2093,14 @@
}
}
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd,
+ TX_TYPE tx_type) {
(void)eob;
switch (tx_type) {
case DCT_DCT:
- vp10_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
- DCT_DCT, bd);
+ av1_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
+ DCT_DCT, bd);
break;
#if CONFIG_EXT_TX
case ADST_DCT:
@@ -2120,7 +2117,7 @@
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
- vp10_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
+ av1_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
break;
case IDTX:
highbd_inv_idtx_add_c(input, dest, stride, 32, tx_type, bd);
@@ -2129,7 +2126,7 @@
default: assert(0); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
INV_TXFM_PARAM *inv_txfm_param) {
@@ -2140,45 +2137,39 @@
switch (tx_size) {
case TX_32X32:
- vp10_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
break;
case TX_16X16:
- vp10_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
break;
- case TX_8X8:
- vp10_inv_txfm_add_8x8(input, dest, stride, eob, tx_type);
- break;
+ case TX_8X8: av1_inv_txfm_add_8x8(input, dest, stride, eob, tx_type); break;
#if CONFIG_EXT_TX
- case TX_4X8:
- vp10_inv_txfm_add_4x8(input, dest, stride, eob, tx_type);
- break;
- case TX_8X4:
- vp10_inv_txfm_add_8x4(input, dest, stride, eob, tx_type);
- break;
+ case TX_4X8: av1_inv_txfm_add_4x8(input, dest, stride, eob, tx_type); break;
+ case TX_8X4: av1_inv_txfm_add_8x4(input, dest, stride, eob, tx_type); break;
case TX_8X16:
- vp10_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
break;
case TX_16X8:
- vp10_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
break;
case TX_16X32:
- vp10_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
break;
case TX_32X16:
- vp10_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
+ av1_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
break;
#endif // CONFIG_EXT_TX
case TX_4X4:
- // this is like vp10_short_idct4x4 but has a special case around eob<=1
+ // this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp10_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
+ av1_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
break;
default: assert(0 && "Invalid transform size"); break;
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
INV_TXFM_PARAM *inv_txfm_param) {
const TX_TYPE tx_type = inv_txfm_param->tx_type;
@@ -2189,42 +2180,42 @@
switch (tx_size) {
case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
break;
case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
break;
case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
break;
#if CONFIG_EXT_TX
case TX_4X8:
- vp10_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
break;
case TX_8X4:
- vp10_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
break;
case TX_8X16:
- vp10_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
break;
case TX_16X8:
- vp10_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
break;
case TX_16X32:
- vp10_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
break;
case TX_32X16:
- vp10_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
+ av1_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
break;
#endif // CONFIG_EXT_TX
case TX_4X4:
- // this is like vp10_short_idct4x4 but has a special case around eob<=1
+ // this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp10_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
- lossless);
+ av1_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
+ lossless);
break;
default: assert(0 && "Invalid transform size"); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/idct.h b/av1/common/idct.h
index 9b3be62..58ee0c7 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_IDCT_H_
-#define VP10_COMMON_IDCT_H_
+#ifndef AV1_COMMON_IDCT_H_
+#define AV1_COMMON_IDCT_H_
#include <assert.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/blockd.h"
#include "av1/common/common.h"
#include "av1/common/enums.h"
@@ -30,7 +30,7 @@
TX_SIZE tx_size;
int eob;
int lossless;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int bd;
#endif
} INV_TXFM_PARAM;
@@ -41,78 +41,78 @@
transform_1d cols, rows; // vertical and horizontal
} transform_2d;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
typedef struct {
highbd_transform_1d cols, rows; // vertical and horizontal
} highbd_transform_2d;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define MAX_TX_SCALE 1
int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
const TX_SIZE tx_size);
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob);
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob);
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob);
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob);
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob);
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob);
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type, int lossless);
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type, int lossless);
#if CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type);
#endif // CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, TX_TYPE tx_type);
void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
INV_TXFM_PARAM *inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd);
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd);
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
- int eob, int bd);
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd);
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd);
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd, TX_TYPE tx_type,
- int lossless);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd);
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd);
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+ int eob, int bd);
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd);
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd);
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type,
+ int lossless);
#if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type);
#endif // CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
- int stride, int eob, int bd,
- TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd,
+ TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+ int stride, int eob, int bd,
+ TX_TYPE tx_type);
void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
INV_TXFM_PARAM *inv_txfm_param);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_IDCT_H_
+#endif // AV1_COMMON_IDCT_H_
diff --git a/av1/common/intra_filters.h b/av1/common/intra_filters.h
index 021fb8e..350f7ca 100644
--- a/av1/common/intra_filters.h
+++ b/av1/common/intra_filters.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_INTRA_FILTERS_H_
-#define VP10_COMMON_INTRA_FILTERS_H_
+#ifndef AV1_COMMON_INTRA_FILTERS_H_
+#define AV1_COMMON_INTRA_FILTERS_H_
#define FILTER_INTRA_PREC_BITS (10)
@@ -64,4 +64,4 @@
},
};
-#endif // VP10_COMMON_INTRA_FILTERS_H_
+#endif // AV1_COMMON_INTRA_FILTERS_H_
diff --git a/av1/common/loopfilter.c b/av1/common/loopfilter.c
index e4636a5..906223f 100644
--- a/av1/common/loopfilter.c
+++ b/av1/common/loopfilter.c
@@ -10,14 +10,14 @@
#include <math.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/loopfilter.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/reconinter.h"
#include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/seg_common.h"
@@ -241,7 +241,7 @@
static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
const MB_MODE_INFO *mbmi) {
#if CONFIG_SUPERTX
- const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+ const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx);
assert(
IMPLIES(supertx_enabled(mbmi), mbmi->segment_id_supertx != MAX_SEGMENTS));
assert(IMPLIES(supertx_enabled(mbmi),
@@ -252,7 +252,7 @@
return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
}
-void vp10_loop_filter_init(VP10_COMMON *cm) {
+void av1_loop_filter_init(AV1_COMMON *cm) {
loop_filter_info_n *lfi = &cm->lf_info;
struct loopfilter *lf = &cm->lf;
int lvl;
@@ -266,7 +266,7 @@
memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
}
-void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
+void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl) {
int seg_id;
// n_shift is the multiplier for lf_deltas
// the multiplier is 1 for when filter_lvl is between 0 and 31;
@@ -341,52 +341,52 @@
if (mask & 1) {
if ((mask_16x16_0 | mask_16x16_1) & 1) {
if ((mask_16x16_0 & mask_16x16_1) & 1) {
- vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr);
} else if (mask_16x16_0 & 1) {
- vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+ aom_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
} else {
- vpx_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+ aom_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
}
}
if ((mask_8x8_0 | mask_8x8_1) & 1) {
if ((mask_8x8_0 & mask_8x8_1) & 1) {
- vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
} else if (mask_8x8_0 & 1) {
- vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+ aom_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
} else {
- vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+ aom_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
}
}
if ((mask_4x4_0 | mask_4x4_1) & 1) {
if ((mask_4x4_0 & mask_4x4_1) & 1) {
- vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
} else if (mask_4x4_0 & 1) {
- vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+ aom_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
} else {
- vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+ aom_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
}
}
if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
- vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ aom_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
} else if (mask_4x4_int_0 & 1) {
- vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ aom_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr);
} else {
- vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
+ aom_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
}
}
@@ -405,7 +405,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_vert_row2(
int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
unsigned int mask_8x8_l, unsigned int mask_4x4_l,
@@ -434,55 +434,55 @@
if (mask & 1) {
if ((mask_16x16_0 | mask_16x16_1) & 1) {
if ((mask_16x16_0 & mask_16x16_1) & 1) {
- vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, bd);
} else if (mask_16x16_0 & 1) {
- vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, bd);
} else {
- vpx_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
+ aom_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
lfi1->lim, lfi1->hev_thr, bd);
}
}
if ((mask_8x8_0 | mask_8x8_1) & 1) {
if ((mask_8x8_0 & mask_8x8_1) & 1) {
- vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr, bd);
} else if (mask_8x8_0 & 1) {
- vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, bd);
} else {
- vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
+ aom_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
lfi1->lim, lfi1->hev_thr, bd);
}
}
if ((mask_4x4_0 | mask_4x4_1) & 1) {
if ((mask_4x4_0 & mask_4x4_1) & 1) {
- vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr, bd);
} else if (mask_4x4_0 & 1) {
- vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, bd);
} else {
- vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
+ aom_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
lfi1->lim, lfi1->hev_thr, bd);
}
}
if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
- vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr, bd);
} else if (mask_4x4_int_0 & 1) {
- vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ aom_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
lfi0->hev_thr, bd);
} else {
- vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
+ aom_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
lfi1->lim, lfi1->hev_thr, bd);
}
}
@@ -500,7 +500,7 @@
mask_4x4_int_1 >>= 1;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static void filter_selectively_horiz(
uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
@@ -517,11 +517,11 @@
if (mask & 1) {
if (mask_16x16 & 1) {
if ((mask_16x16 & 3) == 3) {
- vpx_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
count = 2;
} else {
- vpx_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
}
} else if (mask_8x8 & 1) {
@@ -529,28 +529,28 @@
// Next block's thresholds.
const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
- vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, lfin->mblim, lfin->lim,
lfin->hev_thr);
if ((mask_4x4_int & 3) == 3) {
- vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+ aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, lfin->mblim,
lfin->lim, lfin->hev_thr);
} else {
if (mask_4x4_int & 1)
- vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
else if (mask_4x4_int & 2)
- vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
lfin->lim, lfin->hev_thr);
}
count = 2;
} else {
- vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
if (mask_4x4_int & 1)
- vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
}
} else if (mask_4x4 & 1) {
@@ -558,31 +558,31 @@
// Next block's thresholds.
const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
- vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, lfin->mblim, lfin->lim,
lfin->hev_thr);
if ((mask_4x4_int & 3) == 3) {
- vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+ aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, lfin->mblim,
lfin->lim, lfin->hev_thr);
} else {
if (mask_4x4_int & 1)
- vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
else if (mask_4x4_int & 2)
- vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
lfin->lim, lfin->hev_thr);
}
count = 2;
} else {
- vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
if (mask_4x4_int & 1)
- vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
}
} else if (mask_4x4_int & 1) {
- vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr);
}
}
@@ -595,7 +595,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_horiz(
uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -611,11 +611,11 @@
if (mask & 1) {
if (mask_16x16 & 1) {
if ((mask_16x16 & 3) == 3) {
- vpx_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
count = 2;
} else {
- vpx_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
}
} else if (mask_8x8 & 1) {
@@ -623,30 +623,30 @@
// Next block's thresholds.
const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
- vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, lfin->mblim, lfin->lim,
lfin->hev_thr, bd);
if ((mask_4x4_int & 3) == 3) {
- vpx_highbd_lpf_horizontal_4_dual(
+ aom_highbd_lpf_horizontal_4_dual(
s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
lfin->mblim, lfin->lim, lfin->hev_thr, bd);
} else {
if (mask_4x4_int & 1) {
- vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, bd);
} else if (mask_4x4_int & 2) {
- vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
lfin->lim, lfin->hev_thr, bd);
}
}
count = 2;
} else {
- vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
if (mask_4x4_int & 1) {
- vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, bd);
}
}
@@ -655,34 +655,34 @@
// Next block's thresholds.
const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
- vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, lfin->mblim, lfin->lim,
lfin->hev_thr, bd);
if ((mask_4x4_int & 3) == 3) {
- vpx_highbd_lpf_horizontal_4_dual(
+ aom_highbd_lpf_horizontal_4_dual(
s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
lfin->mblim, lfin->lim, lfin->hev_thr, bd);
} else {
if (mask_4x4_int & 1) {
- vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, bd);
} else if (mask_4x4_int & 2) {
- vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
lfin->lim, lfin->hev_thr, bd);
}
}
count = 2;
} else {
- vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
if (mask_4x4_int & 1) {
- vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
lfi->lim, lfi->hev_thr, bd);
}
}
} else if (mask_4x4_int & 1) {
- vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
}
}
@@ -694,7 +694,7 @@
mask_4x4_int >>= count;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// This function ors into the current lfm structure, where to do loop
// filters for the specific mi we are looking at. It uses information
@@ -833,9 +833,9 @@
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
- MODE_INFO **mi, const int mode_info_stride,
- LOOP_FILTER_MASK *lfm) {
+void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
+ MODE_INFO **mi, const int mode_info_stride,
+ LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
MODE_INFO **mip = mi;
@@ -861,13 +861,13 @@
const int shift_32_uv[] = { 0, 2, 8, 10 };
const int shift_16_uv[] = { 0, 1, 4, 5 };
int i;
- const int max_rows = VPXMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
- const int max_cols = VPXMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
+ const int max_rows = AOMMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
+ const int max_cols = AOMMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
#if CONFIG_EXT_PARTITION
assert(0 && "Not yet updated");
#endif // CONFIG_EXT_PARTITION
- vp10_zero(*lfm);
+ av1_zero(*lfm);
assert(mip[0] != NULL);
// TODO(jimbankoski): Try moving most of the following code into decode
@@ -1123,15 +1123,15 @@
if (mask & 1) {
if (mask_16x16 & 1) {
- vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
} else if (mask_8x8 & 1) {
- vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
} else if (mask_4x4 & 1) {
- vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
}
}
if (mask_4x4_int & 1)
- vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+ aom_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
s += 8;
lfl += 1;
mask_16x16 >>= 1;
@@ -1141,7 +1141,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_vert(
uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -1154,18 +1154,18 @@
if (mask & 1) {
if (mask_16x16 & 1) {
- vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+ aom_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
bd);
} else if (mask_8x8 & 1) {
- vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+ aom_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
bd);
} else if (mask_4x4 & 1) {
- vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+ aom_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
bd);
}
}
if (mask_4x4_int & 1)
- vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
+ aom_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
lfi->hev_thr, bd);
s += 8;
lfl += 1;
@@ -1175,11 +1175,11 @@
mask_4x4_int >>= 1;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_filter_block_plane_non420(VP10_COMMON *cm,
- struct macroblockd_plane *plane,
- MODE_INFO **mib, int mi_row, int mi_col) {
+void av1_filter_block_plane_non420(AV1_COMMON *cm,
+ struct macroblockd_plane *plane,
+ MODE_INFO **mib, int mi_row, int mi_col) {
const int ss_x = plane->subsampling_x;
const int ss_y = plane->subsampling_y;
const int row_step = 1 << ss_y;
@@ -1254,17 +1254,17 @@
#if CONFIG_EXT_TX && CONFIG_RECT_TX
tx_size_r =
- VPXMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
- tx_size_c = VPXMIN(txsize_vert_map[tx_size],
+ AOMMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
+ tx_size_c = AOMMIN(txsize_vert_map[tx_size],
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = txsize_horz_map[tx_size];
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] =
txsize_vert_map[tx_size];
#else
- tx_size_r = VPXMIN(tx_size, cm->above_txfm_context[mi_col + c]);
+ tx_size_r = AOMMIN(tx_size, cm->above_txfm_context[mi_col + c]);
tx_size_c =
- VPXMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
+ AOMMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = tx_size;
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] = tx_size;
@@ -1333,7 +1333,7 @@
// Disable filtering on the leftmost column
border_mask = ~(mi_col == 0);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert(
CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1350,7 +1350,7 @@
filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
mask_8x8_c & border_mask, mask_4x4_c & border_mask,
mask_4x4_int[r], &cm->lf_info, &lfl[r][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += MI_SIZE * dst->stride;
mib += row_step * cm->mi_stride;
}
@@ -1374,7 +1374,7 @@
mask_8x8_r = mask_8x8[r];
mask_4x4_r = mask_4x4[r];
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1389,14 +1389,14 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
&lfl[r][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += MI_SIZE * dst->stride;
}
}
-void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
- struct macroblockd_plane *const plane,
- int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss00(AV1_COMMON *const cm,
+ struct macroblockd_plane *const plane,
+ int mi_row, LOOP_FILTER_MASK *lfm) {
struct buf_2d *const dst = &plane->dst;
uint8_t *const dst0 = dst->buf;
int r;
@@ -1415,7 +1415,7 @@
unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
// Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert_row2(
plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1430,7 +1430,7 @@
filter_selectively_vert_row2(
plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 2 * MI_SIZE * dst->stride;
mask_16x16 >>= 2 * MI_SIZE;
mask_8x8 >>= 2 * MI_SIZE;
@@ -1460,7 +1460,7 @@
mask_4x4_r = mask_4x4 & 0xff;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(
CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1475,7 +1475,7 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
&lfm->lfl_y[r][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += MI_SIZE * dst->stride;
mask_16x16 >>= MI_SIZE;
@@ -1485,9 +1485,9 @@
}
}
-void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
- struct macroblockd_plane *const plane,
- int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss11(AV1_COMMON *const cm,
+ struct macroblockd_plane *const plane,
+ int mi_row, LOOP_FILTER_MASK *lfm) {
struct buf_2d *const dst = &plane->dst;
uint8_t *const dst0 = dst->buf;
int r, c;
@@ -1514,7 +1514,7 @@
unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
// Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert_row2(
plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1530,7 +1530,7 @@
filter_selectively_vert_row2(
plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r >> 1][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 2 * MI_SIZE * dst->stride;
mask_16x16 >>= MI_SIZE;
@@ -1565,7 +1565,7 @@
mask_4x4_r = mask_4x4 & 0xf;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(
CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1580,7 +1580,7 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
&lfm->lfl_uv[r >> 1][0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += MI_SIZE * dst->stride;
mask_16x16 >>= MI_SIZE / 2;
@@ -1590,9 +1590,9 @@
}
}
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int start, int stop, int y_only) {
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int start, int stop, int y_only) {
#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
int mi_row, mi_col;
@@ -1608,11 +1608,11 @@
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += cm->mib_size) {
int plane;
- vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+ av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
for (plane = 0; plane < num_planes; ++plane)
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
- mi_col);
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+ mi_col);
}
}
#else
@@ -1635,23 +1635,23 @@
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
int plane;
- vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+ av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
// TODO(JBB): Make setup_mask work for non 420.
- vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+ av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
- vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
for (plane = 1; plane < num_planes; ++plane) {
switch (path) {
case LF_PATH_420:
- vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_444:
- vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_SLOW:
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
- mi_row, mi_col);
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ mi_row, mi_col);
break;
}
}
@@ -1660,9 +1660,9 @@
#endif // CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
}
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- MACROBLOCKD *xd, int frame_filter_level, int y_only,
- int partial_frame) {
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ MACROBLOCKD *xd, int frame_filter_level, int y_only,
+ int partial_frame) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (!frame_filter_level) return;
start_mi_row = 0;
@@ -1670,17 +1670,16 @@
if (partial_frame && cm->mi_rows > 8) {
start_mi_row = cm->mi_rows >> 1;
start_mi_row &= 0xfffffff8;
- mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+ mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
- vp10_loop_filter_frame_init(cm, frame_filter_level);
- vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
+ av1_loop_filter_frame_init(cm, frame_filter_level);
+ av1_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
}
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm,
- const struct macroblockd_plane planes[MAX_MB_PLANE]) {
+ struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]) {
lf_data->frame_buffer = frame_buffer;
lf_data->cm = cm;
lf_data->start = 0;
@@ -1689,9 +1688,9 @@
memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
}
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
(void)unused;
- vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
- lf_data->start, lf_data->stop, lf_data->y_only);
+ av1_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+ lf_data->start, lf_data->stop, lf_data->y_only);
return 1;
}
diff --git a/av1/common/loopfilter.h b/av1/common/loopfilter.h
index b85ed04..d3377e2 100644
--- a/av1/common/loopfilter.h
+++ b/av1/common/loopfilter.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_LOOPFILTER_H_
-#define VP10_COMMON_LOOPFILTER_H_
+#ifndef AV1_COMMON_LOOPFILTER_H_
+#define AV1_COMMON_LOOPFILTER_H_
#include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/blockd.h"
#include "av1/common/restoration.h"
@@ -89,49 +89,49 @@
} LOOP_FILTER_MASK;
/* assorted loopfilter functions which get used elsewhere */
-struct VP10Common;
+struct AV1Common;
struct macroblockd;
-struct VP10LfSyncData;
+struct AV1LfSyncData;
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
- const int mi_col, MODE_INFO **mi_8x8,
- const int mode_info_stride, LOOP_FILTER_MASK *lfm);
+void av1_setup_mask(struct AV1Common *const cm, const int mi_row,
+ const int mi_col, MODE_INFO **mi_8x8,
+ const int mode_info_stride, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
- struct macroblockd_plane *const plane,
- int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss00(struct AV1Common *const cm,
+ struct macroblockd_plane *const plane,
+ int mi_row, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
- struct macroblockd_plane *const plane,
- int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss11(struct AV1Common *const cm,
+ struct macroblockd_plane *const plane,
+ int mi_row, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_non420(struct VP10Common *cm,
- struct macroblockd_plane *plane,
- MODE_INFO **mi_8x8, int mi_row, int mi_col);
+void av1_filter_block_plane_non420(struct AV1Common *cm,
+ struct macroblockd_plane *plane,
+ MODE_INFO **mi_8x8, int mi_row, int mi_col);
-void vp10_loop_filter_init(struct VP10Common *cm);
+void av1_loop_filter_init(struct AV1Common *cm);
// Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(),
-// vp10_loop_filter_frame()
+// This should be called before av1_loop_filter_rows(),
+// av1_loop_filter_frame()
// calls this function directly.
-void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
+void av1_loop_filter_frame_init(struct AV1Common *cm, int default_filt_lvl);
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
- struct macroblockd *mbd, int filter_level,
- int y_only, int partial_frame);
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+ struct macroblockd *mbd, int filter_level,
+ int y_only, int partial_frame);
// Apply the loop filter to [start, stop) macro block rows in frame_buffer.
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int start, int stop, int y_only);
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
+ struct AV1Common *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int start, int stop, int y_only);
typedef struct LoopFilterWorkerData {
YV12_BUFFER_CONFIG *frame_buffer;
- struct VP10Common *cm;
+ struct AV1Common *cm;
struct macroblockd_plane planes[MAX_MB_PLANE];
int start;
@@ -139,14 +139,14 @@
int y_only;
} LFWorkerData;
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
+ struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
// Operates on the rows described by 'lf_data'.
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_LOOPFILTER_H_
+#endif // AV1_COMMON_LOOPFILTER_H_
diff --git a/av1/common/mips/dspr2/itrans16_dspr2.c b/av1/common/mips/dspr2/itrans16_dspr2.c
index c0b9b2a..9e63d4d 100644
--- a/av1/common/mips/dspr2/itrans16_dspr2.c
+++ b/av1/common/mips/dspr2/itrans16_dspr2.c
@@ -11,8 +11,8 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
#include "av1/common/common.h"
#include "av1/common/blockd.h"
#include "av1/common/idct.h"
@@ -21,8 +21,8 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
- int tx_type) {
+void av1_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+ int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
int16_t *outptr = out;
@@ -90,7 +90,7 @@
dest[j * pitch + i]);
}
} break;
- default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans4_dspr2.c b/av1/common/mips/dspr2/itrans4_dspr2.c
index dcb28c9..61fc0e7 100644
--- a/av1/common/mips/dspr2/itrans4_dspr2.c
+++ b/av1/common/mips/dspr2/itrans4_dspr2.c
@@ -11,8 +11,8 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
#include "av1/common/common.h"
#include "av1/common/blockd.h"
#include "av1/common/idct.h"
@@ -21,8 +21,8 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+ int dest_stride, int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
int16_t *outptr = out;
@@ -36,11 +36,11 @@
switch (tx_type) {
case DCT_DCT: // DCT in both horizontal and vertical
- vpx_idct4_rows_dspr2(input, outptr);
- vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+ aom_idct4_rows_dspr2(input, outptr);
+ aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
break;
case ADST_DCT: // ADST in vertical, DCT in horizontal
- vpx_idct4_rows_dspr2(input, outptr);
+ aom_idct4_rows_dspr2(input, outptr);
outptr = out;
@@ -66,7 +66,7 @@
temp_in[i * 4 + j] = out[j * 4 + i];
}
}
- vpx_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
+ aom_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
break;
case ADST_ADST: // ADST in both directions
for (i = 0; i < 4; ++i) {
@@ -84,7 +84,7 @@
ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
}
break;
- default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans8_dspr2.c b/av1/common/mips/dspr2/itrans8_dspr2.c
index 761d6f0..fe99f31 100644
--- a/av1/common/mips/dspr2/itrans8_dspr2.c
+++ b/av1/common/mips/dspr2/itrans8_dspr2.c
@@ -11,8 +11,8 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
#include "av1/common/common.h"
#include "av1/common/blockd.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
@@ -20,8 +20,8 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
- int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+ int dest_stride, int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
int16_t *outptr = out;
@@ -78,7 +78,7 @@
ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
}
break;
- default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/msa/idct16x16_msa.c b/av1/common/mips/msa/idct16x16_msa.c
index baa3a97..e5a68fa 100644
--- a/av1/common/mips/msa/idct16x16_msa.c
+++ b/av1/common/mips/msa/idct16x16_msa.c
@@ -13,8 +13,8 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
- int32_t dst_stride, int32_t tx_type) {
+void av1_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+ int32_t dst_stride, int32_t tx_type) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
int16_t *out_ptr = &out[0];
@@ -24,13 +24,13 @@
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+ aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
dst_stride);
}
break;
@@ -38,12 +38,12 @@
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
- vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+ aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
(dst + (i << 3)), dst_stride);
}
break;
@@ -51,13 +51,13 @@
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+ aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
dst_stride);
}
break;
@@ -65,12 +65,12 @@
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+ aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
- vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+ aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
(dst + (i << 3)), dst_stride);
}
break;
diff --git a/av1/common/mips/msa/idct4x4_msa.c b/av1/common/mips/msa/idct4x4_msa.c
index 0620df7..7b4ba12 100644
--- a/av1/common/mips/msa/idct4x4_msa.c
+++ b/av1/common/mips/msa/idct4x4_msa.c
@@ -13,8 +13,8 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
- int32_t dst_stride, int32_t tx_type) {
+void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+ int32_t dst_stride, int32_t tx_type) {
v8i16 in0, in1, in2, in3;
/* load vector elements of 4x4 block */
@@ -24,31 +24,31 @@
switch (tx_type) {
case DCT_DCT:
/* DCT in horizontal */
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* DCT in vertical */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case ADST_DCT:
/* DCT in horizontal */
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* ADST in vertical */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case DCT_ADST:
/* ADST in horizontal */
- VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* DCT in vertical */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case ADST_ADST:
/* ADST in horizontal */
- VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* ADST in vertical */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
default: assert(0); break;
}
diff --git a/av1/common/mips/msa/idct8x8_msa.c b/av1/common/mips/msa/idct8x8_msa.c
index 5c62c4a..ce61676 100644
--- a/av1/common/mips/msa/idct8x8_msa.c
+++ b/av1/common/mips/msa/idct8x8_msa.c
@@ -13,8 +13,8 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
- int32_t dst_stride, int32_t tx_type) {
+void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+ int32_t dst_stride, int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
/* load vector elements of 8x8 block */
@@ -26,42 +26,42 @@
switch (tx_type) {
case DCT_DCT:
/* DCT in horizontal */
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* DCT in vertical */
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
break;
case ADST_DCT:
/* DCT in horizontal */
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* ADST in vertical */
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case DCT_ADST:
/* ADST in horizontal */
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
/* DCT in vertical */
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
break;
case ADST_ADST:
/* ADST in horizontal */
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
/* ADST in vertical */
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
default: assert(0); break;
@@ -72,7 +72,7 @@
SRARI_H4_SH(in4, in5, in6, in7, 5);
/* add block and store 8x8 */
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
dst += (4 * dst_stride);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
}
diff --git a/av1/common/mv.h b/av1/common/mv.h
index dba3336..4908d74 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_MV_H_
-#define VP10_COMMON_MV_H_
+#ifndef AV1_COMMON_MV_H_
+#define AV1_COMMON_MV_H_
#include "av1/common/common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
#if CONFIG_GLOBAL_MOTION
#include "av1/common/warped_motion.h"
#endif // CONFIG_GLOBAL_MOTION
@@ -146,4 +146,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_MV_H_
+#endif // AV1_COMMON_MV_H_
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 836b065..e14df3c 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -146,7 +146,7 @@
return newmv_count;
}
-static uint8_t scan_row_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_row_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const int mi_row, const int mi_col, int block,
const MV_REFERENCE_FRAME rf[2], int row_offset,
CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -164,7 +164,7 @@
xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
const int len =
- VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
+ AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
newmv_count += add_ref_mv_candidate(
candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -178,7 +178,7 @@
return newmv_count;
}
-static uint8_t scan_col_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_col_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const int mi_row, const int mi_col, int block,
const MV_REFERENCE_FRAME rf[2], int col_offset,
CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -196,7 +196,7 @@
xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
const int len =
- VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
+ AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
newmv_count += add_ref_mv_candidate(
candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -210,7 +210,7 @@
return newmv_count;
}
-static uint8_t scan_blk_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_blk_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const int mi_row, const int mi_col, int block,
const MV_REFERENCE_FRAME rf[2], int row_offset,
int col_offset, CANDIDATE_MV *ref_mv_stack,
@@ -288,7 +288,7 @@
for (rf = 0; rf < 2; ++rf) {
if (candidate->ref_frame[rf] == ref_frame) {
- const int list_range = VPXMIN(refmv_count, MAX_MV_REF_CANDIDATES);
+ const int list_range = AOMMIN(refmv_count, MAX_MV_REF_CANDIDATES);
const int_mv pred_mv = candidate->mv[rf];
for (idx = 0; idx < list_range; ++idx)
@@ -304,7 +304,7 @@
}
}
-static void setup_ref_mv_list(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref_frame,
uint8_t *refmv_count, CANDIDATE_MV *ref_mv_stack,
int_mv *mv_ref_list, int block, int mi_row,
@@ -320,11 +320,11 @@
? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
: NULL;
- int bs = VPXMAX(xd->n8_w, xd->n8_h);
+ int bs = AOMMAX(xd->n8_w, xd->n8_h);
int has_tr = has_top_right(xd, mi_row, mi_col, bs);
MV_REFERENCE_FRAME rf[2];
- vp10_set_ref_frame(rf, ref_frame);
+ av1_set_ref_frame(rf, ref_frame);
mode_context[ref_frame] = 0;
*refmv_count = 0;
@@ -502,7 +502,7 @@
xd->n8_h << 3, xd);
}
} else {
- for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
+ for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
mv_ref_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
clamp_mv_ref(&mv_ref_list[idx].as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
}
@@ -512,7 +512,7 @@
// This function searches the neighbourhood of a given MB/SB
// to try and find candidate reference vectors.
-static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list, int block, int mi_row,
int mi_col, find_mv_refs_sync sync,
@@ -648,10 +648,10 @@
#if CONFIG_EXT_INTER
// This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
- MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
- int block, int mi_row, int mi_col,
- int16_t *mode_context) {
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+ MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+ int block, int mi_row, int mi_col,
+ int16_t *mode_context) {
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
int context_counter = 0;
@@ -691,26 +691,26 @@
}
#endif // CONFIG_EXT_INTER
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
#if CONFIG_REF_MV
- uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+ uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
#if CONFIG_EXT_INTER
- int16_t *compound_mode_context,
+ int16_t *compound_mode_context,
#endif // CONFIG_EXT_INTER
#endif
- int_mv *mv_ref_list, int mi_row, int mi_col,
- find_mv_refs_sync sync, void *const data,
- int16_t *mode_context) {
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data,
+ int16_t *mode_context) {
#if CONFIG_REF_MV
int idx, all_zero = 1;
#endif
#if CONFIG_EXT_INTER
- vp10_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
+ av1_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
#if CONFIG_REF_MV
- compound_mode_context);
+ compound_mode_context);
#else
- mode_context);
+ mode_context);
#endif // CONFIG_REF_MV
find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col, sync,
data, NULL);
@@ -730,8 +730,8 @@
#endif
}
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
- int_mv *near_mv) {
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+ int_mv *near_mv) {
int i;
// Make sure all the candidates are properly clamped etc
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
@@ -741,16 +741,16 @@
*near_mv = mvlist[1];
}
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
- int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+ int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
- CANDIDATE_MV *ref_mv_stack,
- uint8_t *ref_mv_count,
+ CANDIDATE_MV *ref_mv_stack,
+ uint8_t *ref_mv_count,
#endif
#if CONFIG_EXT_INTER
- int_mv *mv_list,
+ int_mv *mv_list,
#endif // CONFIG_EXT_INTER
- int_mv *nearest_mv, int_mv *near_mv) {
+ int_mv *nearest_mv, int_mv *near_mv) {
#if !CONFIG_EXT_INTER
int_mv mv_list[MAX_MV_REF_CANDIDATES];
#endif // !CONFIG_EXT_INTER
@@ -789,7 +789,7 @@
clamp_mv_ref(&ref_mv_stack[idx].this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3,
xd);
- for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
+ for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
mv_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
#endif
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index babd4f0..b65509a 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -7,8 +7,8 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_MVREF_COMMON_H_
-#define VP10_COMMON_MVREF_COMMON_H_
+#ifndef AV1_COMMON_MVREF_COMMON_H_
+#define AV1_COMMON_MVREF_COMMON_H_
#include "av1/common/onyxc_int.h"
#include "av1/common/blockd.h"
@@ -340,7 +340,7 @@
}
static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
- const int use_hp = allow_hp && vp10_use_mv_hp(mv);
+ const int use_hp = allow_hp && av1_use_mv_hp(mv);
if (!use_hp) {
if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
@@ -348,8 +348,8 @@
}
#if CONFIG_REF_MV
-static INLINE int vp10_nmv_ctx(const uint8_t ref_mv_count,
- const CANDIDATE_MV *ref_mv_stack) {
+static INLINE int av1_nmv_ctx(const uint8_t ref_mv_count,
+ const CANDIDATE_MV *ref_mv_stack) {
#if CONFIG_EXT_INTER
return 0;
#endif
@@ -365,7 +365,7 @@
return 0;
}
-static INLINE int8_t vp10_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
+static INLINE int8_t av1_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
if (rf[1] > INTRA_FRAME) {
return TOTAL_REFS_PER_FRAME + FWD_RF_OFFSET(rf[0]) +
BWD_RF_OFFSET(rf[1]) * FWD_REFS;
@@ -386,8 +386,8 @@
#endif
};
-static INLINE void vp10_set_ref_frame(MV_REFERENCE_FRAME *rf,
- int8_t ref_frame_type) {
+static INLINE void av1_set_ref_frame(MV_REFERENCE_FRAME *rf,
+ int8_t ref_frame_type) {
if (ref_frame_type >= TOTAL_REFS_PER_FRAME) {
rf[0] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][0];
rf[1] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][1];
@@ -399,7 +399,7 @@
}
}
-static INLINE int16_t vp10_mode_context_analyzer(
+static INLINE int16_t av1_mode_context_analyzer(
const int16_t *const mode_context, const MV_REFERENCE_FRAME *const rf,
BLOCK_SIZE bsize, int block) {
int16_t mode_ctx = 0;
@@ -420,8 +420,8 @@
return mode_context[rf[0]];
}
-static INLINE uint8_t vp10_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
- int ref_idx) {
+static INLINE uint8_t av1_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
+ int ref_idx) {
if (ref_mv_stack[ref_idx].weight >= REF_CAT_LEVEL &&
ref_mv_stack[ref_idx + 1].weight >= REF_CAT_LEVEL) {
if (ref_mv_stack[ref_idx].weight == ref_mv_stack[ref_idx + 1].weight)
@@ -447,45 +447,45 @@
#endif
typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
#if CONFIG_REF_MV
- uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+ uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
#if CONFIG_EXT_INTER
- int16_t *compound_mode_context,
+ int16_t *compound_mode_context,
#endif // CONFIG_EXT_INTER
#endif
- int_mv *mv_ref_list, int mi_row, int mi_col,
- find_mv_refs_sync sync, void *const data,
- int16_t *mode_context);
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data,
+ int16_t *mode_context);
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
// score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
- int_mv *near_mv);
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+ int_mv *near_mv);
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
- int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+ int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
- CANDIDATE_MV *ref_mv_stack,
- uint8_t *ref_mv_count,
+ CANDIDATE_MV *ref_mv_stack,
+ uint8_t *ref_mv_count,
#endif
#if CONFIG_EXT_INTER
- int_mv *mv_list,
+ int_mv *mv_list,
#endif // CONFIG_EXT_INTER
- int_mv *nearest_mv, int_mv *near_mv);
+ int_mv *nearest_mv, int_mv *near_mv);
#if CONFIG_EXT_INTER
// This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
- MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
- int block, int mi_row, int mi_col,
- int16_t *mode_context);
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+ MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+ int block, int mi_row, int mi_col,
+ int16_t *mode_context);
#endif // CONFIG_EXT_INTER
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_MVREF_COMMON_H_
+#endif // AV1_COMMON_MVREF_COMMON_H_
diff --git a/av1/common/odintrin.h b/av1/common/odintrin.h
index 87b1a36..8e9b3e4 100644
--- a/av1/common/odintrin.h
+++ b/av1/common/odintrin.h
@@ -1,9 +1,9 @@
-#ifndef VP10_COMMON_ODINTRIN_H_
-#define VP10_COMMON_ODINTRIN_H_
+#ifndef AV1_COMMON_ODINTRIN_H_
+#define AV1_COMMON_ODINTRIN_H_
#include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/bitops.h"
/*Smallest blocks are 4x4*/
@@ -33,7 +33,7 @@
#define OD_DIVU(_x, _d) \
(((_d) < OD_DIVU_DMAX) ? (OD_DIVU_SMALL((_x), (_d))) : ((_x) / (_d)))
-#define OD_MINI VPXMIN
+#define OD_MINI AOMMIN
#define OD_CLAMPI(min, val, max) clamp((val), (min), (max))
#define OD_CLZ0 (1)
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 55a8112..d3bc820 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_ONYXC_INT_H_
-#define VP10_COMMON_ONYXC_INT_H_
+#ifndef AV1_COMMON_ONYXC_INT_H_
+#define AV1_COMMON_ONYXC_INT_H_
-#include "./vpx_config.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom_util/vpx_thread.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_util/aom_thread.h"
+#include "./av1_rtcd.h"
#include "av1/common/alloccommon.h"
#include "av1/common/loopfilter.h"
#include "av1/common/entropymv.h"
@@ -87,14 +87,14 @@
MV_REF *mvs;
int mi_rows;
int mi_cols;
- vpx_codec_frame_buffer_t raw_frame_buffer;
+ aom_codec_frame_buffer_t raw_frame_buffer;
YV12_BUFFER_CONFIG buf;
// The Following variables will only be used in frame parallel decode.
// frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
// that no FrameWorker owns, or is decoding, this buffer.
- VPxWorker *frame_worker_owner;
+ AVxWorker *frame_worker_owner;
// row and col indicate which position frame has been decoded to in real
// pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
@@ -114,8 +114,8 @@
// Private data associated with the frame buffer callbacks.
void *cb_priv;
- vpx_get_frame_buffer_cb_fn_t get_fb_cb;
- vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+ aom_get_frame_buffer_cb_fn_t get_fb_cb;
+ aom_release_frame_buffer_cb_fn_t release_fb_cb;
RefCntBuffer frame_bufs[FRAME_BUFFERS];
@@ -123,9 +123,9 @@
InternalFrameBufferList int_frame_buffers;
} BufferPool;
-typedef struct VP10Common {
- struct vpx_internal_error_info error;
- vpx_color_space_t color_space;
+typedef struct AV1Common {
+ struct aom_internal_error_info error;
+ aom_color_space_t color_space;
int color_range;
int width;
int height;
@@ -140,7 +140,7 @@
int subsampling_x;
int subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Marks if we need to use 16bit frame buffers (1: yes, 0: no).
int use_highbitdepth;
#endif
@@ -247,9 +247,9 @@
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
// Separate mi functions between encoder and decoder.
- int (*alloc_mi)(struct VP10Common *cm, int mi_size);
- void (*free_mi)(struct VP10Common *cm);
- void (*setup_mi)(struct VP10Common *cm);
+ int (*alloc_mi)(struct AV1Common *cm, int mi_size);
+ void (*free_mi)(struct AV1Common *cm);
+ void (*setup_mi)(struct AV1Common *cm);
// Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
// area will be NULL.
@@ -307,7 +307,7 @@
#if CONFIG_ENTROPY
// The initial probabilities for a frame, before any subframe backward update,
// and after forward update.
- vp10_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
+ av1_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
// Number of subframe backward updates already done
uint8_t coef_probs_update_idx;
// Signal if the backward update is subframe or end-of-frame
@@ -319,9 +319,9 @@
unsigned int current_video_frame;
BITSTREAM_PROFILE profile;
- // VPX_BITS_8 in profile 0 or 1, VPX_BITS_10 or VPX_BITS_12 in profile 2 or 3.
- vpx_bit_depth_t bit_depth;
- vpx_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer
+ // AOM_BITS_8 in profile 0 or 1, AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
+ aom_bit_depth_t bit_depth;
+ aom_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer
int error_resilient_mode;
@@ -336,8 +336,8 @@
// Private data associated with the frame buffer callbacks.
void *cb_priv;
- vpx_get_frame_buffer_cb_fn_t get_fb_cb;
- vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+ aom_get_frame_buffer_cb_fn_t get_fb_cb;
+ aom_release_frame_buffer_cb_fn_t release_fb_cb;
// Handles memory for the codec.
InternalFrameBufferList int_frame_buffers;
@@ -356,7 +356,7 @@
// scratch memory for intraonly/keyframe forward updates from default tables
// - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
// each keyframe and not used afterwards
- vpx_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+ aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
#if CONFIG_GLOBAL_MOTION
Global_Motion_Params global_motion[TOTAL_REFS_PER_FRAME];
#endif
@@ -367,7 +367,7 @@
#if CONFIG_DERING
int dering_level;
#endif
-} VP10_COMMON;
+} AV1_COMMON;
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
// frame reference count.
@@ -387,7 +387,7 @@
#endif
}
-static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
if (index < 0 || index >= REF_FRAMES) return NULL;
if (cm->ref_frame_map[index] < 0) return NULL;
assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
@@ -395,11 +395,11 @@
}
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
- const VP10_COMMON *const cm) {
+ const AV1_COMMON *const cm) {
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
-static INLINE int get_free_fb(VP10_COMMON *cm) {
+static INLINE int get_free_fb(AV1_COMMON *cm) {
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
@@ -429,20 +429,20 @@
bufs[new_idx].ref_count++;
}
-static INLINE int mi_cols_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_cols_aligned_to_sb(const AV1_COMMON *cm) {
return ALIGN_POWER_OF_TWO(cm->mi_cols, cm->mib_size_log2);
}
-static INLINE int mi_rows_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_rows_aligned_to_sb(const AV1_COMMON *cm) {
return ALIGN_POWER_OF_TWO(cm->mi_rows, cm->mib_size_log2);
}
-static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
+static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
return cm->frame_type == KEY_FRAME || cm->intra_only;
}
-static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
- tran_low_t *dqcoeff) {
+static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
+ tran_low_t *dqcoeff) {
int i;
for (i = 0; i < MAX_MB_PLANE; ++i) {
xd->plane[i].dqcoeff = dqcoeff;
@@ -536,13 +536,13 @@
#endif
}
-static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm,
+static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
- const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
- const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
+ const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+ const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
return cm->kf_y_prob[above][left];
}
@@ -622,8 +622,8 @@
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
}
-static INLINE void vp10_zero_above_context(VP10_COMMON *const cm,
- int mi_col_start, int mi_col_end) {
+static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
+ int mi_col_start, int mi_col_end) {
const int width = mi_col_end - mi_col_start;
const int offset_y = 2 * mi_col_start;
@@ -631,22 +631,22 @@
const int offset_uv = offset_y >> cm->subsampling_x;
const int width_uv = width_y >> cm->subsampling_x;
- vp10_zero_array(cm->above_context[0] + offset_y, width_y);
- vp10_zero_array(cm->above_context[1] + offset_uv, width_uv);
- vp10_zero_array(cm->above_context[2] + offset_uv, width_uv);
+ av1_zero_array(cm->above_context[0] + offset_y, width_y);
+ av1_zero_array(cm->above_context[1] + offset_uv, width_uv);
+ av1_zero_array(cm->above_context[2] + offset_uv, width_uv);
- vp10_zero_array(cm->above_seg_context + mi_col_start, width);
+ av1_zero_array(cm->above_seg_context + mi_col_start, width);
#if CONFIG_VAR_TX
- vp10_zero_array(cm->above_txfm_context + mi_col_start, width);
+ av1_zero_array(cm->above_txfm_context + mi_col_start, width);
#endif // CONFIG_VAR_TX
}
-static INLINE void vp10_zero_left_context(MACROBLOCKD *const xd) {
- vp10_zero(xd->left_context);
- vp10_zero(xd->left_seg_context);
+static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
+ av1_zero(xd->left_context);
+ av1_zero(xd->left_seg_context);
#if CONFIG_VAR_TX
- vp10_zero(xd->left_txfm_context_buffer);
+ av1_zero(xd->left_txfm_context_buffer);
#endif
}
@@ -684,7 +684,7 @@
}
#endif
-static INLINE PARTITION_TYPE get_partition(const VP10_COMMON *const cm,
+static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
const int mi_row, const int mi_col,
const BLOCK_SIZE bsize) {
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
@@ -726,8 +726,7 @@
}
}
-static INLINE void set_sb_size(VP10_COMMON *const cm,
- const BLOCK_SIZE sb_size) {
+static INLINE void set_sb_size(AV1_COMMON *const cm, const BLOCK_SIZE sb_size) {
cm->sb_size = sb_size;
cm->mib_size = num_8x8_blocks_wide_lookup[cm->sb_size];
cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
@@ -737,4 +736,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ONYXC_INT_H_
+#endif // AV1_COMMON_ONYXC_INT_H_
diff --git a/av1/common/pred_common.c b/av1/common/pred_common.c
index 0e1045e..6fe1188 100644
--- a/av1/common/pred_common.c
+++ b/av1/common/pred_common.c
@@ -34,7 +34,7 @@
return ref_type;
}
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int ctx_offset =
(mbmi->ref_frame[1] > INTRA_FRAME) * INTER_FILTER_COMP_OFFSET;
@@ -67,7 +67,7 @@
return filter_type_ctx;
}
#else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
@@ -115,7 +115,7 @@
if (mode != DC_PRED && mode != TM_PRED) {
int p_angle =
mode_to_angle_map[mode] + ref_mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
+ if (av1_is_intra_filter_switchable(p_angle)) {
ref_type = ref_mbmi->intra_filter;
}
}
@@ -124,7 +124,7 @@
return ref_type;
}
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
int left_type = INTRA_FILTERS, above_type = INTRA_FILTERS;
if (xd->left_available) left_type = get_ref_intra_filter(xd->left_mbmi);
@@ -149,7 +149,7 @@
// 1 - intra/inter, inter/intra
// 2 - intra/--, --/intra
// 3 - intra/intra
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
+int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
@@ -171,8 +171,8 @@
#define CHECK_BWDREF_OR_ALTREF(ref_frame) \
(((ref_frame) == BWDREF_FRAME) || ((ref_frame) == ALTREF_FRAME))
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -218,8 +218,8 @@
#else // CONFIG_EXT_REFS
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -279,8 +279,8 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is either
// GOLDEN_FRAME or LAST3_FRAME.
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -379,8 +379,8 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is LAST_FRAME,
// conditioning on it is either LAST_FRAME or LAST2_FRAME.
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -480,8 +480,8 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME,
// conditioning on it is either GOLDEN or LAST3.
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -575,8 +575,8 @@
}
// Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -687,8 +687,8 @@
#else // CONFIG_EXT_REFS
// Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -777,7 +777,7 @@
// or a BWDREF_FRAME.
//
// NOTE(zoeliu): The probability of ref_frame[0] is ALTREF/BWDREF.
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -856,7 +856,7 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is ALTREF_FRAME, conditioning
// on it is either ALTREF_FRAME/BWDREF_FRAME.
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -950,7 +950,7 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is LAST3/GOLDEN, conditioning
// on it is either LAST3/GOLDEN/LAST2/LAST.
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1047,7 +1047,7 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is LAST2_FRAME, conditioning
// on it is either LAST2_FRAME/LAST_FRAME.
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1139,7 +1139,7 @@
//
// NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME, conditioning
// on it is either GOLDEN_FRAME/LAST3_FRAME.
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1229,7 +1229,7 @@
#else // CONFIG_EXT_REFS
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1295,7 +1295,7 @@
return pred_context;
}
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 9a3e3f1..5873bf0 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -8,37 +8,37 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_PRED_COMMON_H_
-#define VP10_COMMON_PRED_COMMON_H_
+#ifndef AV1_COMMON_PRED_COMMON_H_
+#define AV1_COMMON_PRED_COMMON_H_
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#ifdef __cplusplus
extern "C" {
#endif
-static INLINE int get_segment_id(const VP10_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *cm,
const uint8_t *segment_ids, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
- const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int xmis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int ymis = AOMMIN(cm->mi_rows - mi_row, bh);
int x, y, segment_id = MAX_SEGMENTS;
for (y = 0; y < ymis; ++y)
for (x = 0; x < xmis; ++x)
segment_id =
- VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+ AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
return segment_id;
}
-static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+static INLINE int av1_get_pred_context_seg_id(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_sip =
@@ -48,12 +48,12 @@
return above_sip + left_sip;
}
-static INLINE vpx_prob vp10_get_pred_prob_seg_id(
+static INLINE aom_prob av1_get_pred_prob_seg_id(
const struct segmentation_probs *segp, const MACROBLOCKD *xd) {
- return segp->pred_probs[vp10_get_pred_context_seg_id(xd)];
+ return segp->pred_probs[av1_get_pred_context_seg_id(xd)];
}
-static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
+static INLINE int av1_get_skip_context(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
@@ -61,109 +61,108 @@
return above_skip + left_skip;
}
-static INLINE vpx_prob vp10_get_skip_prob(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->skip_probs[vp10_get_skip_context(xd)];
+static INLINE aom_prob av1_get_skip_prob(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->skip_probs[av1_get_skip_context(xd)];
}
#if CONFIG_DUAL_FILTER
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
#else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
#endif
#if CONFIG_EXT_INTRA
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd);
#endif // CONFIG_EXT_INTRA
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
+int av1_get_intra_inter_context(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
+static INLINE aom_prob av1_get_intra_inter_prob(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->intra_inter_prob[av1_get_intra_inter_context(xd)];
}
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm, const MACROBLOCKD *xd);
+
+static INLINE aom_prob av1_get_reference_mode_prob(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->comp_inter_prob[av1_get_reference_mode_context(cm, xd)];
+}
+
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
-}
-
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd);
-
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
+ const int pred_context = av1_get_pred_context_comp_ref_p(cm, xd);
return cm->fc->comp_ref_prob[pred_context][0];
}
#if CONFIG_EXT_REFS
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
- const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p1(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- const int pred_context = vp10_get_pred_context_comp_ref_p1(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p1(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = av1_get_pred_context_comp_ref_p1(cm, xd);
return cm->fc->comp_ref_prob[pred_context][1];
}
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
- const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p2(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- const int pred_context = vp10_get_pred_context_comp_ref_p2(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p2(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = av1_get_pred_context_comp_ref_p2(cm, xd);
return cm->fc->comp_ref_prob[pred_context][2];
}
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_comp_bwdref_p(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- const int pred_context = vp10_get_pred_context_comp_bwdref_p(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_bwdref_p(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = av1_get_pred_context_comp_bwdref_p(cm, xd);
return cm->fc->comp_bwdref_prob[pred_context][0];
}
#endif // CONFIG_EXT_REFS
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p1(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p1(xd)][0];
}
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p2(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p2(xd)][1];
}
#if CONFIG_EXT_REFS
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p3(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p3(xd)][2];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p3(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p3(xd)][2];
}
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p4(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p4(xd)][3];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p4(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p4(xd)][3];
}
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p5(const VP10_COMMON *cm,
- const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p5(xd)][4];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p5(const AV1_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p5(xd)][4];
}
#endif // CONFIG_EXT_REFS
@@ -192,7 +191,7 @@
}
#if CONFIG_VAR_TX
-static void update_tx_counts(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void update_tx_counts(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, int blk_row, int blk_col,
TX_SIZE max_tx_size, int ctx) {
@@ -232,7 +231,7 @@
}
}
-static INLINE void inter_block_tx_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void inter_block_tx_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi,
BLOCK_SIZE plane_bsize,
int ctx) {
@@ -254,4 +253,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_PRED_COMMON_H_
+#endif // AV1_COMMON_PRED_COMMON_H_
diff --git a/av1/common/quant_common.c b/av1/common/quant_common.c
index 79d8fb8..3adfa7b 100644
--- a/av1/common/quant_common.c
+++ b/av1/common/quant_common.c
@@ -130,8 +130,8 @@
cuml_bins[i] = ROUND_POWER_OF_TWO(cuml_knots[i] * q, 7);
}
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
- tran_low_t *cuml_bins, int q_profile) {
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+ tran_low_t *cuml_bins, int q_profile) {
const uint8_t *knots = get_nuq_knots(qindex, band, q_profile);
tran_low_t cuml_bins_[NUQ_KNOTS], *cuml_bins_ptr;
tran_low_t doff;
@@ -150,15 +150,15 @@
cuml_bins_ptr[NUQ_KNOTS - 1] + ROUND_POWER_OF_TWO((64 - doff) * q, 7);
}
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
if (v <= NUQ_KNOTS)
return dq[v];
else
return dq[NUQ_KNOTS] + (v - NUQ_KNOTS) * q;
}
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
- tran_low_t dqmag = vp10_dequant_abscoeff_nuq(abs(v), q, dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
+ tran_low_t dqmag = av1_dequant_abscoeff_nuq(abs(v), q, dq);
return (v < 0 ? -dqmag : dqmag);
}
#endif // CONFIG_NEW_QUANT
@@ -185,7 +185,7 @@
1184, 1232, 1282, 1336,
};
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
4, 9, 10, 13, 15, 17, 20, 22, 25, 28, 31, 34, 37,
40, 43, 47, 50, 53, 57, 60, 64, 68, 71, 75, 78, 82,
@@ -260,7 +260,7 @@
1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
};
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
4, 9, 11, 13, 16, 18, 21, 24, 27, 30, 33, 37, 40,
44, 48, 51, 55, 59, 63, 67, 71, 75, 79, 83, 88, 92,
@@ -312,14 +312,14 @@
};
#endif
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
- case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
- case VPX_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
@@ -328,14 +328,14 @@
#endif
}
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
- case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
- case VPX_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+ case AOM_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
@@ -344,8 +344,8 @@
#endif
}
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
- int base_qindex) {
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+ int base_qindex) {
if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
const int seg_qindex =
@@ -357,11 +357,11 @@
}
#if CONFIG_AOM_QM
-qm_val_t *aom_iqmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_iqmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
int log2sizem2, int is_intra) {
return &cm->giqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
}
-qm_val_t *aom_qmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_qmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
int log2sizem2, int is_intra) {
return &cm->gqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
}
@@ -371,7 +371,7 @@
static uint16_t
wt_matrix_ref[NUM_QM_LEVELS][2][2][4 * 4 + 8 * 8 + 16 * 16 + 32 * 32];
-void aom_qm_init(VP10_COMMON *cm) {
+void aom_qm_init(AV1_COMMON *cm) {
int q, c, f, t, size;
int current;
for (q = 0; q < NUM_QM_LEVELS; ++q) {
diff --git a/av1/common/quant_common.h b/av1/common/quant_common.h
index 6ceed49..d04103e 100644
--- a/av1/common/quant_common.h
+++ b/av1/common/quant_common.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_QUANT_COMMON_H_
-#define VP10_COMMON_QUANT_COMMON_H_
+#ifndef AV1_COMMON_QUANT_COMMON_H_
+#define AV1_COMMON_QUANT_COMMON_H_
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
#include "av1/common/seg_common.h"
#include "av1/common/enums.h"
@@ -34,25 +34,25 @@
#define DEFAULT_QM_LAST (NUM_QM_LEVELS - 1)
#endif
-struct VP10Common;
+struct AV1Common;
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
- int base_qindex);
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+ int base_qindex);
#if CONFIG_AOM_QM
// Reduce the large number of quantizers to a smaller number of levels for which
// different matrices may be defined
static inline int aom_get_qmlevel(int qindex, int first, int last) {
int qmlevel = (qindex * (last + 1 - first) + QINDEX_RANGE / 2) / QINDEX_RANGE;
- qmlevel = VPXMIN(qmlevel + first, NUM_QM_LEVELS - 1);
+ qmlevel = AOMMIN(qmlevel + first, NUM_QM_LEVELS - 1);
return qmlevel;
}
-void aom_qm_init(struct VP10Common *cm);
-qm_val_t *aom_iqmatrix(struct VP10Common *cm, int qindex, int comp,
+void aom_qm_init(struct AV1Common *cm);
+qm_val_t *aom_iqmatrix(struct AV1Common *cm, int qindex, int comp,
int log2sizem2, int is_intra);
-qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
+qm_val_t *aom_qmatrix(struct AV1Common *cm, int qindex, int comp,
int log2sizem2, int is_intra);
#endif
@@ -64,13 +64,13 @@
typedef tran_low_t dequant_val_type_nuq[NUQ_KNOTS + 1];
typedef tran_low_t cuml_bins_type_nuq[NUQ_KNOTS];
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
- tran_low_t *cuml_bins, int dq_off_index);
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+ tran_low_t *cuml_bins, int dq_off_index);
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
static INLINE int get_dq_profile_from_ctx(int q_ctx) {
- return VPXMIN(q_ctx, QUANT_PROFILES - 1);
+ return AOMMIN(q_ctx, QUANT_PROFILES - 1);
}
#endif // CONFIG_NEW_QUANT
@@ -78,4 +78,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_QUANT_COMMON_H_
+#endif // AV1_COMMON_QUANT_COMMON_H_
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 0c3b93a..3db35e7 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -10,11 +10,11 @@
#include <assert.h>
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_scale_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/blend.h"
#include "av1/common/blockd.h"
@@ -242,9 +242,9 @@
return master;
}
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
- BLOCK_SIZE sb_type, int offset_x,
- int offset_y) {
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+ BLOCK_SIZE sb_type, int offset_x,
+ int offset_y) {
const uint8_t *mask =
get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
if (mask) mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
@@ -326,13 +326,13 @@
if (wbits == 0) continue;
for (w = 0; w < wtypes; ++w) {
mask = get_wedge_mask_inplace(w, 0, bsize);
- vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+ aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
bh);
wedge_params->masks[0][w] = dst;
dst += bw * bh;
mask = get_wedge_mask_inplace(w, 1, bsize);
- vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+ aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
bh);
wedge_params->masks[1][w] = dst;
dst += bw * bh;
@@ -342,7 +342,7 @@
}
// Equation of line: f(x, y) = a[0]*(x - a[2]*w/8) + a[1]*(y - a[3]*h/8) = 0
-void vp10_init_wedge_masks() {
+void av1_init_wedge_masks() {
init_wedge_master_masks();
init_wedge_signs();
init_wedge_masks();
@@ -355,13 +355,13 @@
BLOCK_SIZE sb_type, int wedge_offset_x, int wedge_offset_y, int h, int w) {
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
- wedge_offset_x, wedge_offset_y);
- vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+ const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+ wedge_offset_x, wedge_offset_y);
+ aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
mask, MASK_MASTER_STRIDE, h, w, subh, subw);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void build_masked_compound_wedge_extend_highbd(
uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -369,13 +369,13 @@
int bd) {
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
- wedge_offset_x, wedge_offset_y);
- vpx_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+ const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+ wedge_offset_x, wedge_offset_y);
+ aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
src1_stride, mask, MASK_MASTER_STRIDE, h, w, subh,
subw, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
@@ -388,13 +388,13 @@
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask =
- vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
- vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+ av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+ aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
mask, 4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh,
subw);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void build_masked_compound_wedge_highbd(
uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -404,28 +404,28 @@
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask =
- vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
- vpx_highbd_blend_a64_mask(
+ av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+ aom_highbd_blend_a64_mask(
dst_8, dst_stride, src0_8, src0_stride, src1_8, src1_stride, mask,
4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh, subw, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
- uint8_t *dst, int dst_stride,
- const int subpel_x, const int subpel_y,
- const struct scale_factors *sf, int w,
- int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x, const int subpel_y,
+ const struct scale_factors *sf, int w,
+ int h,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- int xs, int ys,
+ int xs, int ys,
#if CONFIG_SUPERTX
- int wedge_offset_x, int wedge_offset_y,
+ int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
- const MACROBLOCKD *xd) {
+ const MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
// The prediction filter types used here should be those for
// the second reference block.
@@ -436,13 +436,13 @@
#else
INTERP_FILTER tmp_ipf = interp_filter;
#endif // CONFIG_DUAL_FILTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_dst_[2 * MAX_SB_SQUARE]);
uint8_t *tmp_dst = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
? CONVERT_TO_BYTEPTR(tmp_dst_)
: tmp_dst_;
- vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
- subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+ av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+ subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
#if CONFIG_SUPERTX
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
@@ -466,10 +466,10 @@
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
-#else // CONFIG_VP9_HIGHBITDEPTH
+#else // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_dst[MAX_SB_SQUARE]);
- vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
- subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+ av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+ subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
#if CONFIG_SUPERTX
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
@@ -481,12 +481,12 @@
mi->mbmi.interinter_wedge_sign, mi->mbmi.sb_type,
h, w);
#endif // CONFIG_SUPERTX
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
#endif // CONFIG_EXT_INTER
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
#if CONFIG_DUAL_FILTER
@@ -498,7 +498,7 @@
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+ MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
const int subpel_x = mv.col & SUBPEL_MASK;
const int subpel_y = mv.row & SUBPEL_MASK;
@@ -508,22 +508,22 @@
sf, w, h, ref, interp_filter, sf->x_step_q4,
sf->y_step_q4, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, const MV *src_mv,
- const struct scale_factors *sf, int w, int h,
- int ref,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, const MV *src_mv,
+ const struct scale_factors *sf, int w, int h,
+ int ref,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- enum mv_precision precision, int x, int y) {
+ enum mv_precision precision, int x, int y) {
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+ MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
const int subpel_x = mv.col & SUBPEL_MASK;
const int subpel_y = mv.row & SUBPEL_MASK;
@@ -600,7 +600,7 @@
uint8_t *pre;
MV32 scaled_mv;
int xs, ys, subpel_x, subpel_y;
- const int is_scaled = vp10_is_scaled(sf);
+ const int is_scaled = av1_is_scaled(sf);
x = x_base + idx * x_step;
y = y_base + idy * y_step;
@@ -610,7 +610,7 @@
if (is_scaled) {
pre =
pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
- scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
@@ -628,7 +628,7 @@
#if CONFIG_EXT_INTER
if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
mi->mbmi.use_wedge_interinter)
- vp10_make_masked_inter_predictor(
+ av1_make_masked_inter_predictor(
pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
sf, w, h, mi->mbmi.interp_filter, xs, ys,
#if CONFIG_SUPERTX
@@ -637,9 +637,9 @@
xd);
else
#endif // CONFIG_EXT_INTER
- vp10_make_inter_predictor(
- pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
- sf, x_step, y_step, ref, mi->mbmi.interp_filter, xs, ys, xd);
+ av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, x_step, y_step,
+ ref, mi->mbmi.interp_filter, xs, ys, xd);
}
}
}
@@ -667,11 +667,11 @@
uint8_t *pre;
MV32 scaled_mv;
int xs, ys, subpel_x, subpel_y;
- const int is_scaled = vp10_is_scaled(sf);
+ const int is_scaled = av1_is_scaled(sf);
if (is_scaled) {
pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
- scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
@@ -689,36 +689,36 @@
#if CONFIG_EXT_INTER
if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
mi->mbmi.use_wedge_interinter)
- vp10_make_masked_inter_predictor(pre, pre_buf->stride, dst,
- dst_buf->stride, subpel_x, subpel_y, sf,
- w, h, mi->mbmi.interp_filter, xs, ys,
+ av1_make_masked_inter_predictor(pre, pre_buf->stride, dst,
+ dst_buf->stride, subpel_x, subpel_y, sf,
+ w, h, mi->mbmi.interp_filter, xs, ys,
#if CONFIG_SUPERTX
- wedge_offset_x, wedge_offset_y,
+ wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
- xd);
+ xd);
else
#else // CONFIG_EXT_INTER
#if CONFIG_GLOBAL_MOTION
if (is_global[ref])
- vp10_warp_plane(&(gm[ref]->motion_params),
-#if CONFIG_VP9_HIGHBITDEPTH
- xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- pre_buf->buf0, pre_buf->width, pre_buf->height,
- pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
- (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
- pd->subsampling_x, pd->subsampling_y, xs, ys);
+ av1_warp_plane(&(gm[ref]->motion_params),
+#if CONFIG_AOM_HIGHBITDEPTH
+ xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ pre_buf->buf0, pre_buf->width, pre_buf->height,
+ pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
+ (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
+ pd->subsampling_x, pd->subsampling_y, xs, ys);
else
#endif // CONFIG_GLOBAL_MOTION
#endif // CONFIG_EXT_INTER
- vp10_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
- subpel_x, subpel_y, sf, w, h, ref,
- mi->mbmi.interp_filter, xs, ys, xd);
+ av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, w, h, ref,
+ mi->mbmi.interp_filter, xs, ys, xd);
}
}
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
- int ir, int ic, int mi_row, int mi_col) {
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+ int ic, int mi_row, int mi_col) {
struct macroblockd_plane *const pd = &xd->plane[plane];
MODE_INFO *const mi = xd->mi[0];
const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
@@ -732,27 +732,27 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
const uint8_t *pre =
&pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_build_inter_predictor(
+ av1_highbd_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir, xd->bd);
} else {
- vp10_build_inter_predictor(
+ av1_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir);
}
#else
- vp10_build_inter_predictor(
+ av1_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, mi->mbmi.interp_filter, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
mi_row * MI_SIZE + 4 * ir);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -804,61 +804,61 @@
}
}
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
- vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
- xd->plane[0].dst.stride, bsize);
+ av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, bsize);
#endif // CONFIG_EXT_INTER
}
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize, int plane) {
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int plane) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi)) {
if (plane == 0) {
- vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
- xd->plane[0].dst.stride, bsize);
+ av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, bsize);
} else {
- vp10_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
- xd->plane[plane].dst.stride, plane,
- bsize);
+ av1_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
+ xd->plane[plane].dst.stride, plane,
+ bsize);
}
}
#endif // CONFIG_EXT_INTER
}
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
MAX_MB_PLANE - 1);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
- vp10_build_interintra_predictors_sbuv(
+ av1_build_interintra_predictors_sbuv(
xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].dst.stride, bsize);
#endif // CONFIG_EXT_INTER
}
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
MAX_MB_PLANE - 1);
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
- vp10_build_interintra_predictors(
+ av1_build_interintra_predictors(
xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
xd->plane[0].dst.stride, xd->plane[1].dst.stride,
xd->plane[2].dst.stride, bsize);
#endif // CONFIG_EXT_INTER
}
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col) {
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row,
+ int mi_col) {
uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
src->v_buffer };
const int widths[MAX_MB_PLANE] = { src->y_crop_width, src->uv_crop_width,
@@ -877,9 +877,9 @@
}
}
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col, const struct scale_factors *sf) {
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+ const struct scale_factors *sf) {
if (src != NULL) {
int i;
uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
@@ -929,7 +929,7 @@
return NULL;
}
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
@@ -946,9 +946,9 @@
int w_remain, h_remain;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int is_hdb = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
assert(bsize <= BLOCK_32X32);
assert(IMPLIES(plane == 0, ssx == 0));
@@ -963,13 +963,13 @@
dst += h_offset * dst_stride;
pre += h_offset * pre_stride;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (is_hdb)
- vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
+ aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
pre_stride, mask, h, top_w, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
mask, h, top_w);
dst += h * dst_stride;
@@ -984,13 +984,13 @@
dst += w_offset;
pre += w_offset;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (is_hdb)
- vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
+ aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
pre_stride, mask, top_h, w, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
mask, top_h, w);
dst += w;
@@ -1007,7 +1007,7 @@
return;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (is_hdb) {
dst = (uint8_t *)CONVERT_TO_SHORTPTR(dst);
pre = (const uint8_t *)CONVERT_TO_SHORTPTR(pre);
@@ -1015,7 +1015,7 @@
pre_stride *= 2;
w_remain *= 2;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
do {
memcpy(dst, pre, w_remain * sizeof(uint8_t));
@@ -1024,13 +1024,12 @@
} while (--h_remain);
}
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
#if CONFIG_EXT_INTER
- int mi_row_ori,
- int mi_col_ori,
+ int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
- int mi_row, int mi_col,
- BLOCK_SIZE bsize, int block) {
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int block) {
// Prediction function used in supertx:
// Use the mv at current block (which is less than 8x8)
// to get prediction of a block located at (mi_row, mi_col) at size of bsize
@@ -1068,19 +1067,19 @@
}
#if CONFIG_EXT_INTER
if (is_interintra_pred(&xd->mi[0]->mbmi))
- vp10_build_interintra_predictors(
+ av1_build_interintra_predictors(
xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
xd->plane[0].dst.stride, xd->plane[1].dst.stride,
xd->plane[2].dst.stride, bsize);
#endif // CONFIG_EXT_INTER
}
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
#if CONFIG_EXT_INTER
- int mi_row_ori, int mi_col_ori,
+ int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
- int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
@@ -1152,7 +1151,7 @@
};
#endif // CONFIG_EXT_PARTITION
-const uint8_t *vp10_get_obmc_mask(int length) {
+const uint8_t *av1_get_obmc_mask(int length) {
switch (length) {
case 1: return obmc_mask_1;
case 2: return obmc_mask_2;
@@ -1171,22 +1170,22 @@
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- uint8_t *above[MAX_MB_PLANE],
- int above_stride[MAX_MB_PLANE],
- uint8_t *left[MAX_MB_PLANE],
- int left_stride[MAX_MB_PLANE]) {
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ uint8_t *above[MAX_MB_PLANE],
+ int above_stride[MAX_MB_PLANE],
+ uint8_t *left[MAX_MB_PLANE],
+ int left_stride[MAX_MB_PLANE]) {
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int plane, i;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// handle above row
if (xd->up_available) {
const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
- const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+ const int miw = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
const int mi_row_offset = -1;
assert(miw > 0);
@@ -1197,7 +1196,7 @@
const MB_MODE_INFO *const above_mbmi =
&xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
const int mi_step =
- VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+ AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
if (is_neighbor_overlappable(above_mbmi)) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1209,15 +1208,15 @@
const int tmp_stride = above_stride[plane];
const uint8_t *const tmp =
&above[plane][(i * MI_SIZE) >> pd->subsampling_x];
- const uint8_t *const mask = vp10_get_obmc_mask(bh);
+ const uint8_t *const mask = av1_get_obmc_mask(bh);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (is_hbd)
- vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+ aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw);
}
}
@@ -1228,7 +1227,7 @@
// handle left column
if (xd->left_available) {
const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
- const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+ const int mih = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
const int mi_col_offset = -1;
assert(mih > 0);
@@ -1239,7 +1238,7 @@
const MB_MODE_INFO *const left_mbmi =
&xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
const int mi_step =
- VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+ AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
if (is_neighbor_overlappable(left_mbmi)) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1252,15 +1251,15 @@
const int tmp_stride = left_stride[plane];
const uint8_t *const tmp =
&left[plane][(i * MI_SIZE * tmp_stride) >> pd->subsampling_y];
- const uint8_t *const mask = vp10_get_obmc_mask(bw);
+ const uint8_t *const mask = av1_get_obmc_mask(bw);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (is_hbd)
- vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+ aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw);
}
}
@@ -1282,19 +1281,19 @@
}
#endif // CONFIG_EXT_INTER
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_width[MAX_MB_PLANE],
- int tmp_height[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ uint8_t *tmp_buf[MAX_MB_PLANE],
+ int tmp_width[MAX_MB_PLANE],
+ int tmp_height[MAX_MB_PLANE],
+ int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
if (mi_row <= tile->mi_row_start) return;
- for (i = 0; i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
+ for (i = 0; i < AOMMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
int mi_row_offset = -1;
int mi_col_offset = i;
int mi_x, mi_y, bw, bh;
@@ -1304,7 +1303,7 @@
MB_MODE_INFO backup_mbmi;
#endif // CONFIG_EXT_INTER
- mi_step = VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+ mi_step = AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
if (!is_neighbor_overlappable(above_mbmi)) continue;
@@ -1324,11 +1323,11 @@
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
- if ((!vp10_is_valid_scale(&ref_buf->sf)))
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ if ((!av1_is_valid_scale(&ref_buf->sf)))
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
- &ref_buf->sf);
+ av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
+ &ref_buf->sf);
}
xd->mb_to_left_edge = -(((mi_col + i) * MI_SIZE) * 8);
@@ -1338,7 +1337,7 @@
for (j = 0; j < MAX_MB_PLANE; ++j) {
const struct macroblockd_plane *pd = &xd->plane[j];
bw = (mi_step * 8) >> pd->subsampling_x;
- bh = VPXMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
+ bh = AOMMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
4);
if (above_mbmi->sb_type < BLOCK_8X8) {
@@ -1379,19 +1378,19 @@
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
}
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_width[MAX_MB_PLANE],
- int tmp_height[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ uint8_t *tmp_buf[MAX_MB_PLANE],
+ int tmp_width[MAX_MB_PLANE],
+ int tmp_height[MAX_MB_PLANE],
+ int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
if (mi_col == 0 || (mi_col - 1 < tile->mi_col_start)) return;
- for (i = 0; i < VPXMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
+ for (i = 0; i < AOMMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
int mi_row_offset = i;
int mi_col_offset = -1;
int mi_x, mi_y, bw, bh;
@@ -1401,7 +1400,7 @@
MB_MODE_INFO backup_mbmi;
#endif // CONFIG_EXT_INTER
- mi_step = VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+ mi_step = AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
if (!is_neighbor_overlappable(left_mbmi)) continue;
@@ -1421,11 +1420,11 @@
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
- if ((!vp10_is_valid_scale(&ref_buf->sf)))
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ if ((!av1_is_valid_scale(&ref_buf->sf)))
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
- &ref_buf->sf);
+ av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
+ &ref_buf->sf);
}
xd->mb_to_top_edge = -(((mi_row + i) * MI_SIZE) * 8);
@@ -1434,7 +1433,7 @@
for (j = 0; j < MAX_MB_PLANE; ++j) {
const struct macroblockd_plane *pd = &xd->plane[j];
- bw = VPXMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
+ bw = AOMMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
4);
bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y;
@@ -1515,10 +1514,10 @@
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask =
- vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+ av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
- vpx_blend_a64_mask(
+ aom_blend_a64_mask(
comppred, compstride, intrapred, intrastride, interpred, interstride,
mask, 4 * num_4x4_blocks_wide_lookup[bsize], bh, bw, subh, subw);
}
@@ -1531,7 +1530,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[i * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1542,7 +1541,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[j * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1556,7 +1555,7 @@
ii_weights1d[j * size_scale]) >>
2;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1570,7 +1569,7 @@
ii_weights1d[i * size_scale]) >>
2;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1581,7 +1580,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1594,7 +1593,7 @@
(ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
1;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1605,7 +1604,7 @@
default:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- comppred[i * compstride + j] = VPX_BLEND_AVG(
+ comppred[i * compstride + j] = AOM_BLEND_AVG(
intrapred[i * intrastride + j], interpred[i * interstride + j]);
}
}
@@ -1613,7 +1612,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void combine_interintra_highbd(
INTERINTRA_MODE mode, int use_wedge_interintra, int wedge_index,
int wedge_sign, BLOCK_SIZE bsize, BLOCK_SIZE plane_bsize,
@@ -1631,10 +1630,10 @@
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask =
- vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+ av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
- vpx_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
+ aom_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
interpred8, interstride, mask, bw, bh, bw, subh,
subw, bd);
}
@@ -1647,7 +1646,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[i * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1658,7 +1657,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[j * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1672,7 +1671,7 @@
ii_weights1d[j * size_scale]) >>
2;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1686,7 +1685,7 @@
ii_weights1d[i * size_scale]) >>
2;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1697,7 +1696,7 @@
for (j = 0; j < bw; ++j) {
int scale = ii_weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1710,7 +1709,7 @@
(ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
1;
comppred[i * compstride + j] =
- VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+ AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
interpred[i * interstride + j]);
}
}
@@ -1721,14 +1720,14 @@
default:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- comppred[i * compstride + j] = VPX_BLEND_AVG(
+ comppred[i * compstride + j] = AOM_BLEND_AVG(
interpred[i * interstride + j], intrapred[i * intrastride + j]);
}
}
break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Break down rectangular intra prediction for joint spatio-temporal prediction
// into two square intra predictions.
@@ -1745,47 +1744,47 @@
TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
if (bwl == bhl) {
- vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
- dst, dst_stride, 0, 0, plane);
+ av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+ dst, dst_stride, 0, 0, plane);
} else if (bwl < bhl) {
uint8_t *src_2 = ref + pxbw * ref_stride;
uint8_t *dst_2 = dst + pxbw * dst_stride;
- vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
- dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+ dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
memcpy(src_216 - ref_stride, dst_216 - dst_stride,
sizeof(*src_216) * pxbw);
} else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
memcpy(src_2 - ref_stride, dst_2 - dst_stride, sizeof(*src_2) * pxbw);
}
- vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
- dst_2, dst_stride, 0, 1 << bwl, plane);
+ av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+ dst_2, dst_stride, 0, 1 << bwl, plane);
} else { // bwl > bhl
int i;
uint8_t *src_2 = ref + pxbh;
uint8_t *dst_2 = dst + pxbh;
- vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
- dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+ dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
for (i = 0; i < pxbh; ++i)
src_216[i * ref_stride - 1] = dst_216[i * dst_stride - 1];
} else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
for (i = 0; i < pxbh; ++i)
src_2[i * ref_stride - 1] = dst_2[i * dst_stride - 1];
}
- vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
- dst_2, dst_stride, 1 << bhl, 0, plane);
+ av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+ dst_2, dst_stride, 1 << bhl, 0, plane);
}
}
@@ -1795,20 +1794,20 @@
D117_PRED, D153_PRED, D207_PRED, D63_PRED, TM_PRED
};
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
- BLOCK_SIZE bsize, int plane,
- uint8_t *dst, int dst_stride) {
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+ BLOCK_SIZE bsize, int plane,
+ uint8_t *dst, int dst_stride) {
build_intra_predictors_for_interintra(
xd, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride, dst,
dst_stride, interintra_to_intra_mode[xd->mi[0]->mbmi.interintra_mode],
bsize, plane);
}
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
- const uint8_t *inter_pred, int inter_stride,
- const uint8_t *intra_pred, int intra_stride) {
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+ const uint8_t *inter_pred, int inter_stride,
+ const uint8_t *intra_pred, int intra_stride) {
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
combine_interintra_highbd(
xd->mi[0]->mbmi.interintra_mode, xd->mi[0]->mbmi.use_wedge_interintra,
@@ -1818,7 +1817,7 @@
inter_stride, intra_pred, intra_stride, xd->bd);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
combine_interintra(xd->mi[0]->mbmi.interintra_mode,
xd->mi[0]->mbmi.use_wedge_interintra,
xd->mi[0]->mbmi.interintra_wedge_index,
@@ -1827,63 +1826,63 @@
inter_pred, inter_stride, intra_pred, intra_stride);
}
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
- int ystride, BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+ int ystride, BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, intrapredictor[MAX_SB_SQUARE]);
- vp10_build_intra_predictors_for_interintra(
+ av1_build_intra_predictors_for_interintra(
xd, bsize, 0, CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, 0, ypred, ystride,
- CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
+ av1_combine_interintra(xd, bsize, 0, ypred, ystride,
+ CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
DECLARE_ALIGNED(16, uint8_t, intrapredictor[MAX_SB_SQUARE]);
- vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
- MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
- MAX_SB_SIZE);
+ av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
+ MAX_SB_SIZE);
+ av1_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
+ MAX_SB_SIZE);
}
}
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
- int ustride, int plane,
- BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+ int ustride, int plane,
+ BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, uintrapredictor[MAX_SB_SQUARE]);
- vp10_build_intra_predictors_for_interintra(
+ av1_build_intra_predictors_for_interintra(
xd, bsize, plane, CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, plane, upred, ustride,
- CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
+ av1_combine_interintra(xd, bsize, plane, upred, ustride,
+ CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
DECLARE_ALIGNED(16, uint8_t, uintrapredictor[MAX_SB_SQUARE]);
- vp10_build_intra_predictors_for_interintra(xd, bsize, plane,
- uintrapredictor, MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
- MAX_SB_SIZE);
+ av1_build_intra_predictors_for_interintra(xd, bsize, plane, uintrapredictor,
+ MAX_SB_SIZE);
+ av1_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
+ MAX_SB_SIZE);
}
}
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
- uint8_t *vpred, int ustride,
- int vstride, BLOCK_SIZE bsize) {
- vp10_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
- vp10_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+ uint8_t *vpred, int ustride,
+ int vstride, BLOCK_SIZE bsize) {
+ av1_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
+ av1_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
}
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
- uint8_t *upred, uint8_t *vpred,
- int ystride, int ustride, int vstride,
- BLOCK_SIZE bsize) {
- vp10_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
- vp10_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
- bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+ uint8_t *upred, uint8_t *vpred,
+ int ystride, int ustride, int vstride,
+ BLOCK_SIZE bsize) {
+ av1_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
+ av1_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
+ bsize);
}
// Builds the inter-predictor for the single ref case
@@ -1899,7 +1898,7 @@
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
struct buf_2d *const pre_buf = &pd->pre[ref];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint8_t *const dst =
(xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? CONVERT_TO_BYTEPTR(ext_dst)
: ext_dst) +
@@ -1922,11 +1921,11 @@
uint8_t *pre;
MV32 scaled_mv;
int xs, ys, subpel_x, subpel_y;
- const int is_scaled = vp10_is_scaled(sf);
+ const int is_scaled = av1_is_scaled(sf);
if (is_scaled) {
pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
- scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
@@ -1941,12 +1940,12 @@
pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
(scaled_mv.col >> SUBPEL_BITS);
- vp10_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
- subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
- ys, xd);
+ av1_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
+ subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
+ ys, xd);
}
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]) {
int plane;
@@ -1987,7 +1986,7 @@
if (is_compound && is_interinter_wedge_used(mbmi->sb_type) &&
mbmi->use_wedge_interinter) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_highbd(
dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
@@ -1995,28 +1994,30 @@
mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
mbmi->sb_type, h, w, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
build_masked_compound_wedge(
dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
ext_dst_stride1, mbmi->interinter_wedge_index,
mbmi->interinter_wedge_sign, mbmi->sb_type, h, w);
} else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- vpx_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
+ aom_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
dst, dst_buf->stride, NULL, 0, NULL, 0, w, h,
xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
0, NULL, 0, w, h);
}
}
-void vp10_build_wedge_inter_predictor_from_buf(
- MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
- uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
- int ext_dst_stride1[3]) {
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ int plane_from, int plane_to,
+ uint8_t *ext_dst0[3],
+ int ext_dst_stride0[3],
+ uint8_t *ext_dst1[3],
+ int ext_dst_stride1[3]) {
int plane;
for (plane = plane_from; plane <= plane_to; ++plane) {
const BLOCK_SIZE plane_bsize =
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 092926d..4182d9f 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_RECONINTER_H_
-#define VP10_COMMON_RECONINTER_H_
+#ifndef AV1_COMMON_RECONINTER_H_
+#define AV1_COMMON_RECONINTER_H_
#include "av1/common/filter.h"
#include "av1/common/onyxc_int.h"
-#include "av1/common/vp10_convolve.h"
-#include "aom/vpx_integer.h"
+#include "av1/common/av1_convolve.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -33,27 +33,27 @@
int xs, int ys) {
#if CONFIG_DUAL_FILTER
InterpFilterParams interp_filter_params_x =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
InterpFilterParams interp_filter_params_y =
- vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+ av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
#else
InterpFilterParams interp_filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
#if CONFIG_DUAL_FILTER
if (interp_filter_params_x.taps == SUBPEL_TAPS &&
interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
const int16_t *kernel_x =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
const int16_t *kernel_y =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
#else
if (interp_filter_params.taps == SUBPEL_TAPS) {
const int16_t *kernel_x =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
const int16_t *kernel_y =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
#endif
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
if (IsInterpolatingFilter(interp_filter)) {
@@ -72,12 +72,12 @@
// ref_idx > 0 means this is the second reference frame
// first reference frame's prediction result is already in dst
// therefore we need to average the first and second results
- vp10_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
- subpel_x, xs, subpel_y, ys, ref_idx);
+ av1_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+ subpel_x, xs, subpel_y, ys, ref_idx);
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
@@ -92,27 +92,27 @@
int xs, int ys, int bd) {
#if CONFIG_DUAL_FILTER
InterpFilterParams interp_filter_params_x =
- vp10_get_interp_filter_params(interp_filter[1 + 2 * ref]);
+ av1_get_interp_filter_params(interp_filter[1 + 2 * ref]);
InterpFilterParams interp_filter_params_y =
- vp10_get_interp_filter_params(interp_filter[0 + 2 * ref]);
+ av1_get_interp_filter_params(interp_filter[0 + 2 * ref]);
#else
InterpFilterParams interp_filter_params =
- vp10_get_interp_filter_params(interp_filter);
+ av1_get_interp_filter_params(interp_filter);
#endif
#if CONFIG_DUAL_FILTER
if (interp_filter_params_x.taps == SUBPEL_TAPS &&
interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
const int16_t *kernel_x =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
const int16_t *kernel_y =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
#else
if (interp_filter_params.taps == SUBPEL_TAPS) {
const int16_t *kernel_x =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
const int16_t *kernel_y =
- vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+ av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
#endif // CONFIG_DUAL_FILTER
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
if (IsInterpolatingFilter(interp_filter)) {
@@ -134,11 +134,11 @@
// first reference frame's prediction result is already in dst
// therefore we need to average the first and second results
int avg = ref > 0;
- vp10_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
- subpel_x, xs, subpel_y, ys, avg, bd);
+ av1_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+ subpel_x, xs, subpel_y, ys, avg, bd);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EXT_INTER
// Set to one to use larger codebooks
@@ -223,7 +223,7 @@
#endif // CONFIG_SUPERTX && CONFIG_EXT_INTER
int mi_x, int mi_y);
-static INLINE void vp10_make_inter_predictor(
+static INLINE void av1_make_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const int subpel_x, const int subpel_y, const struct scale_factors *sf,
int w, int h, int ref,
@@ -234,32 +234,32 @@
#endif
int xs, int ys, const MACROBLOCKD *xd) {
(void)xd;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
sf, w, h, ref, interp_filter, xs, ys, xd->bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf, w,
h, ref, interp_filter, xs, ys);
}
#if CONFIG_EXT_INTER
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
- uint8_t *dst, int dst_stride,
- const int subpel_x, const int subpel_y,
- const struct scale_factors *sf, int w,
- int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x, const int subpel_y,
+ const struct scale_factors *sf, int w,
+ int h,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- int xs, int ys,
+ int xs, int ys,
#if CONFIG_SUPERTX
- int wedge_offset_x, int wedge_offset_y,
+ int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
- const MACROBLOCKD *xd);
+ const MACROBLOCKD *xd);
#endif // CONFIG_EXT_INTER
static INLINE int round_mv_comp_q4(int value) {
@@ -297,9 +297,9 @@
// If the MV points so far into the UMV border that no visible pixels
// are used for reconstruction, the subpel part of the MV can be
// discarded and the MV limited to 16 pixels with equivalent results.
- const int spel_left = (VPX_INTERP_EXTEND + bw) << SUBPEL_BITS;
+ const int spel_left = (AOM_INTERP_EXTEND + bw) << SUBPEL_BITS;
const int spel_right = spel_left - SUBPEL_SHIFTS;
- const int spel_top = (VPX_INTERP_EXTEND + bh) << SUBPEL_BITS;
+ const int spel_top = (AOM_INTERP_EXTEND + bh) << SUBPEL_BITS;
const int spel_bottom = spel_top - SUBPEL_SHIFTS;
MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)),
src_mv->col * (1 << (1 - ss_x)) };
@@ -328,57 +328,56 @@
return res;
}
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
- int ir, int ic, int mi_row, int mi_col);
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+ int ic, int mi_row, int mi_col);
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize, int plane);
-
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-#if CONFIG_SUPERTX
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
-#if CONFIG_EXT_INTER
- int mi_row_ori,
- int mi_col_ori,
-#endif // CONFIG_EXT_INTER
- int mi_row, int mi_col,
- BLOCK_SIZE bsize, int block);
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int plane);
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+
+#if CONFIG_SUPERTX
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
#if CONFIG_EXT_INTER
- int mi_row_ori, int mi_col_ori,
+ int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
- int mi_row, int mi_col,
- BLOCK_SIZE bsize);
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int block);
+
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+#if CONFIG_EXT_INTER
+ int mi_row_ori, int mi_col_ori,
+#endif // CONFIG_EXT_INTER
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
struct macroblockd_plane;
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
int plane);
#endif // CONFIG_SUPERTX
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride, const MV *mv_q3,
- const struct scale_factors *sf, int w, int h,
- int do_avg,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, const MV *mv_q3,
+ const struct scale_factors *sf, int w, int h,
+ int do_avg,
#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
+ const INTERP_FILTER *interp_filter,
#else
- const INTERP_FILTER interp_filter,
+ const INTERP_FILTER interp_filter,
#endif
- enum mv_precision precision, int x, int y);
+ enum mv_precision precision, int x, int y);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
#if CONFIG_DUAL_FILTER
@@ -410,13 +409,13 @@
dst->stride = stride;
}
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col);
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row,
+ int mi_col);
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col, const struct scale_factors *sf);
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+ const struct scale_factors *sf);
#if CONFIG_DUAL_FILTER
// Detect if the block have sub-pixel level motion vectors
@@ -463,7 +462,7 @@
#endif
#if CONFIG_EXT_INTERP
-static INLINE int vp10_is_interp_needed(const MACROBLOCKD *const xd) {
+static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -483,8 +482,8 @@
#endif
// For scaled references, interpolation filter is indicated all the time.
- if (vp10_is_scaled(&xd->block_refs[0]->sf)) return 1;
- if (is_compound && vp10_is_scaled(&xd->block_refs[1]->sf)) return 1;
+ if (av1_is_scaled(&xd->block_refs[0]->sf)) return 1;
+ if (is_compound && av1_is_scaled(&xd->block_refs[1]->sf)) return 1;
if (bsize < BLOCK_8X8) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -516,81 +515,83 @@
#endif // CONFIG_EXT_INTERP
#if CONFIG_OBMC
-const uint8_t *vp10_get_obmc_mask(int length);
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- uint8_t *above[MAX_MB_PLANE],
- int above_stride[MAX_MB_PLANE],
- uint8_t *left[MAX_MB_PLANE],
- int left_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_width[MAX_MB_PLANE],
- int tmp_height[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
+const uint8_t *av1_get_obmc_mask(int length);
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ uint8_t *above[MAX_MB_PLANE],
+ int above_stride[MAX_MB_PLANE],
+ uint8_t *left[MAX_MB_PLANE],
+ int left_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
int tmp_height[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ uint8_t *tmp_buf[MAX_MB_PLANE],
+ int tmp_width[MAX_MB_PLANE],
+ int tmp_height[MAX_MB_PLANE],
+ int tmp_stride[MAX_MB_PLANE]);
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
#define MASK_MASTER_SIZE (2 * MAX_SB_SIZE)
#define MASK_MASTER_STRIDE (2 * MAX_SB_SIZE)
-void vp10_init_wedge_masks();
+void av1_init_wedge_masks();
-static INLINE const uint8_t *vp10_get_contiguous_soft_mask(int wedge_index,
- int wedge_sign,
- BLOCK_SIZE sb_type) {
+static INLINE const uint8_t *av1_get_contiguous_soft_mask(int wedge_index,
+ int wedge_sign,
+ BLOCK_SIZE sb_type) {
return wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
}
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
- BLOCK_SIZE sb_type, int wedge_offset_x,
- int wedge_offset_y);
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+ BLOCK_SIZE sb_type, int wedge_offset_x,
+ int wedge_offset_y);
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
- uint8_t *upred, uint8_t *vpred,
- int ystride, int ustride, int vstride,
- BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
- int ystride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
- int ustride, int plane,
- BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
- uint8_t *vpred, int ustride,
- int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+ uint8_t *upred, uint8_t *vpred,
+ int ystride, int ustride, int vstride,
+ BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+ int ystride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+ int ustride, int plane,
+ BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+ uint8_t *vpred, int ustride,
+ int vstride, BLOCK_SIZE bsize);
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
- BLOCK_SIZE bsize, int plane,
- uint8_t *intra_pred,
- int intra_stride);
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
- const uint8_t *inter_pred, int inter_stride,
- const uint8_t *intra_pred, int intra_stride);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
- uint8_t *vpred, int ustride,
- int vstride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
- int ystride, BLOCK_SIZE bsize);
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+ BLOCK_SIZE bsize, int plane,
+ uint8_t *intra_pred,
+ int intra_stride);
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+ const uint8_t *inter_pred, int inter_stride,
+ const uint8_t *intra_pred, int intra_stride);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+ uint8_t *vpred, int ustride,
+ int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+ int ystride, BLOCK_SIZE bsize);
// Encoder only
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]);
-void vp10_build_wedge_inter_predictor_from_buf(
- MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
- uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
- int ext_dst_stride1[3]);
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ int plane_from, int plane_to,
+ uint8_t *ext_dst0[3],
+ int ext_dst_stride0[3],
+ uint8_t *ext_dst1[3],
+ int ext_dst_stride1[3]);
#endif // CONFIG_EXT_INTER
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_RECONINTER_H_
+#endif // AV1_COMMON_RECONINTER_H_
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 801f61e..3c08ac4 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -10,17 +10,17 @@
#include <math.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/system_state.h"
-#if CONFIG_VP9_HIGHBITDEPTH
-#include "aom_dsp/vpx_dsp_common.h"
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#include "aom_mem/vpx_mem.h"
+#if CONFIG_AOM_HIGHBITDEPTH
+#include "aom_dsp/aom_dsp_common.h"
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#include "aom_ports/vpx_once.h"
+#include "aom_ports/aom_once.h"
#if CONFIG_EXT_INTRA
#include "av1/common/intra_filters.h"
#endif
@@ -222,14 +222,14 @@
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION_TYPES
-static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
- int right_available,
+static int av1_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int right_available,
#if CONFIG_EXT_PARTITION_TYPES
- PARTITION_TYPE partition,
+ PARTITION_TYPE partition,
#endif
- TX_SIZE txsz, int y, int x, int ss_x) {
+ TX_SIZE txsz, int y, int x, int ss_x) {
const int wl = mi_width_log2_lookup[bsize];
- const int w = VPXMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
+ const int w = AOMMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
const int step = 1 << txsz;
if (!right_available) {
@@ -270,9 +270,9 @@
}
}
-static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
- int bottom_available, TX_SIZE txsz, int y, int x,
- int ss_y) {
+static int av1_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int bottom_available, TX_SIZE txsz, int y, int x,
+ int ss_y) {
if (!bottom_available || x != 0) {
return 0;
} else {
@@ -309,22 +309,22 @@
static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
static intra_pred_fn dc_pred[2][2][TX_SIZES];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above, const uint16_t *left,
int bd);
static intra_high_pred_fn pred_high[INTRA_MODES][4];
static intra_high_pred_fn dc_pred_high[2][2][4];
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void vp10_init_intra_predictors_internal(void) {
+static void av1_init_intra_predictors_internal(void) {
#define INIT_NO_4X4(p, type) \
- p[TX_8X8] = vpx_##type##_predictor_8x8; \
- p[TX_16X16] = vpx_##type##_predictor_16x16; \
- p[TX_32X32] = vpx_##type##_predictor_32x32
+ p[TX_8X8] = aom_##type##_predictor_8x8; \
+ p[TX_16X16] = aom_##type##_predictor_16x16; \
+ p[TX_32X32] = aom_##type##_predictor_32x32
#define INIT_ALL_SIZES(p, type) \
- p[TX_4X4] = vpx_##type##_predictor_4x4; \
+ p[TX_4X4] = aom_##type##_predictor_4x4; \
INIT_NO_4X4(p, type)
INIT_ALL_SIZES(pred[V_PRED], v);
@@ -342,7 +342,7 @@
INIT_ALL_SIZES(dc_pred[1][0], dc_left);
INIT_ALL_SIZES(dc_pred[1][1], dc);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207e);
@@ -357,7 +357,7 @@
INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#undef intra_pred_allsizes
}
@@ -388,13 +388,13 @@
val = ROUND_POWER_OF_TWO(val, 8);
} else {
filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
- filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+ filter = av1_intra_filter_kernels[filter_type][filter_idx];
if (filter_idx < (1 << SUBPEL_BITS)) {
val = 0;
for (k = 0; k < SUBPEL_TAPS; ++k) {
idx = base + 1 - (SUBPEL_TAPS / 2) + k;
- idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+ idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
val += ref[idx] * filter[k];
}
val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -439,7 +439,7 @@
base += 1;
shift = 0;
}
- len = VPXMIN(bs, 2 * bs - 1 - base);
+ len = AOMMIN(bs, 2 * bs - 1 - base);
if (len <= 0) {
int i;
for (i = r; i < bs; ++i) {
@@ -460,8 +460,8 @@
}
} else {
if (!flags[shift]) {
- const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
- vpx_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
+ const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+ aom_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
filter, 16, NULL, 16, 2 * bs,
2 * bs < 16 ? 2 : 1);
flags[shift] = 1;
@@ -570,7 +570,7 @@
base += 1;
shift = 0;
}
- len = VPXMIN(bs, 2 * bs - 1 - base);
+ len = AOMMIN(bs, 2 * bs - 1 - base);
if (len <= 0) {
for (r = 0; r < bs; ++r) {
@@ -590,8 +590,8 @@
}
} else {
if (!flags[shift]) {
- const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
- vpx_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
+ const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+ aom_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
4 * SUBPEL_SHIFTS, NULL, 16, filter, 16,
2 * bs < 16 ? 4 : 4, 2 * bs);
flags[shift] = 1;
@@ -730,53 +730,53 @@
}
}
-void vp10_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED);
}
-void vp10_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED);
}
-void vp10_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED);
}
-void vp10_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED);
}
-void vp10_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED);
}
-void vp10_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED);
}
-void vp10_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED);
}
-void vp10_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED);
}
-void vp10_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED);
}
-void vp10_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED);
}
@@ -784,33 +784,33 @@
int bs, const uint8_t *above,
const uint8_t *left) {
switch (mode) {
- case DC_PRED: vp10_dc_filter_predictor(dst, stride, bs, above, left); break;
- case V_PRED: vp10_v_filter_predictor(dst, stride, bs, above, left); break;
- case H_PRED: vp10_h_filter_predictor(dst, stride, bs, above, left); break;
+ case DC_PRED: av1_dc_filter_predictor(dst, stride, bs, above, left); break;
+ case V_PRED: av1_v_filter_predictor(dst, stride, bs, above, left); break;
+ case H_PRED: av1_h_filter_predictor(dst, stride, bs, above, left); break;
case D45_PRED:
- vp10_d45_filter_predictor(dst, stride, bs, above, left);
+ av1_d45_filter_predictor(dst, stride, bs, above, left);
break;
case D135_PRED:
- vp10_d135_filter_predictor(dst, stride, bs, above, left);
+ av1_d135_filter_predictor(dst, stride, bs, above, left);
break;
case D117_PRED:
- vp10_d117_filter_predictor(dst, stride, bs, above, left);
+ av1_d117_filter_predictor(dst, stride, bs, above, left);
break;
case D153_PRED:
- vp10_d153_filter_predictor(dst, stride, bs, above, left);
+ av1_d153_filter_predictor(dst, stride, bs, above, left);
break;
case D207_PRED:
- vp10_d207_filter_predictor(dst, stride, bs, above, left);
+ av1_d207_filter_predictor(dst, stride, bs, above, left);
break;
case D63_PRED:
- vp10_d63_filter_predictor(dst, stride, bs, above, left);
+ av1_d63_filter_predictor(dst, stride, bs, above, left);
break;
- case TM_PRED: vp10_tm_filter_predictor(dst, stride, bs, above, left); break;
+ case TM_PRED: av1_tm_filter_predictor(dst, stride, bs, above, left); break;
default: assert(0);
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int highbd_intra_subpel_interp(int base, int shift, const uint16_t *ref,
int ref_start_idx, int ref_end_idx,
INTRA_FILTER filter_type) {
@@ -822,13 +822,13 @@
val = ROUND_POWER_OF_TWO(val, 8);
} else {
filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
- filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+ filter = av1_intra_filter_kernels[filter_type][filter_idx];
if (filter_idx < (1 << SUBPEL_BITS)) {
val = 0;
for (k = 0; k < SUBPEL_TAPS; ++k) {
idx = base + 1 - (SUBPEL_TAPS / 2) + k;
- idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+ idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
val += ref[idx] * filter[k];
}
val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -956,7 +956,7 @@
(void)above;
(void)bd;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, left[r], bs);
+ aom_memset16(dst, left[r], bs);
dst += stride;
}
}
@@ -1025,70 +1025,70 @@
}
}
-void vp10_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED,
bd);
}
-void vp10_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED, bd);
}
-void vp10_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED, bd);
}
-void vp10_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED,
bd);
}
-void vp10_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED,
bd);
}
-void vp10_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED,
bd);
}
-void vp10_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED,
bd);
}
-void vp10_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED,
bd);
}
-void vp10_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED,
bd);
}
-void vp10_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED,
bd);
}
@@ -1099,42 +1099,42 @@
const uint16_t *left, int bd) {
switch (mode) {
case DC_PRED:
- vp10_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
break;
case V_PRED:
- vp10_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
break;
case H_PRED:
- vp10_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D45_PRED:
- vp10_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D135_PRED:
- vp10_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D117_PRED:
- vp10_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D153_PRED:
- vp10_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D207_PRED:
- vp10_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
break;
case D63_PRED:
- vp10_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
break;
case TM_PRED:
- vp10_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
+ av1_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
break;
default: assert(0);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EXT_INTRA
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void build_intra_predictors_high(
const MACROBLOCKD *xd, const uint8_t *ref8, int ref_stride, uint8_t *dst8,
int dst_stride, PREDICTION_MODE mode, TX_SIZE tx_size, int n_top_px,
@@ -1194,7 +1194,7 @@
int i;
const int val = (n_left_px == 0) ? base + 1 : base - 1;
for (i = 0; i < bs; ++i) {
- vpx_memset16(dst, val, bs);
+ aom_memset16(dst, val, bs);
dst += dst_stride;
}
return;
@@ -1224,9 +1224,9 @@
left_col[i] = ref[i * ref_stride - 1];
}
if (i < (bs << need_bottom))
- vpx_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
+ aom_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
} else {
- vpx_memset16(left_col, base + 1, bs << need_bottom);
+ aom_memset16(left_col, base + 1, bs << need_bottom);
}
}
@@ -1254,9 +1254,9 @@
i += n_topright_px;
}
if (i < (bs << need_right))
- vpx_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
+ aom_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
} else {
- vpx_memset16(above_row, base - 1, bs << need_right);
+ aom_memset16(above_row, base - 1, bs << need_right);
}
}
@@ -1285,7 +1285,7 @@
if (mode != DC_PRED && mode != TM_PRED &&
xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
INTRA_FILTER filter = INTRA_FILTER_LINEAR;
- if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+ if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
filter = xd->mi[0]->mbmi.intra_filter;
highbd_dr_predictor(dst, dst_stride, bs, const_above_row, left_col, p_angle,
xd->bd, filter);
@@ -1302,7 +1302,7 @@
xd->bd);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
@@ -1453,7 +1453,7 @@
if (mode != DC_PRED && mode != TM_PRED &&
xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
INTRA_FILTER filter = INTRA_FILTER_LINEAR;
- if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+ if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
filter = xd->mi[0]->mbmi.intra_filter;
dr_predictor(dst, dst_stride, tx_size, const_above_row, left_col, p_angle,
filter);
@@ -1470,11 +1470,11 @@
}
}
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
- TX_SIZE tx_size, PREDICTION_MODE mode,
- const uint8_t *ref, int ref_stride, uint8_t *dst,
- int dst_stride, int col_off, int row_off,
- int plane) {
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+ TX_SIZE tx_size, PREDICTION_MODE mode,
+ const uint8_t *ref, int ref_stride, uint8_t *dst,
+ int dst_stride, int col_off, int row_off,
+ int plane) {
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int txw = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -1483,8 +1483,8 @@
const int have_left = col_off || xd->left_available;
const int x = col_off * 4;
const int y = row_off * 4;
- const int bw = pd->subsampling_x ? 1 << bwl_in : VPXMAX(2, 1 << bwl_in);
- const int bh = pd->subsampling_y ? 1 << bhl_in : VPXMAX(2, 1 << bhl_in);
+ const int bw = pd->subsampling_x ? 1 << bwl_in : AOMMAX(2, 1 << bwl_in);
+ const int bh = pd->subsampling_y ? 1 << bhl_in : AOMMAX(2, 1 << bhl_in);
const int mi_row = -xd->mb_to_top_edge >> (3 + MI_SIZE_LOG2);
const int mi_col = -xd->mb_to_left_edge >> (3 + MI_SIZE_LOG2);
const int wpx = 4 * bw;
@@ -1506,31 +1506,30 @@
const PARTITION_TYPE partition = xd->mi[0]->mbmi.partition;
#endif
const int have_right =
- vp10_has_right(bsize, mi_row, mi_col, right_available,
+ av1_has_right(bsize, mi_row, mi_col, right_available,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif
- tx_size, row_off, col_off, pd->subsampling_x);
- const int have_bottom =
- vp10_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size, row_off, col_off,
- pd->subsampling_y);
+ tx_size, row_off, col_off, pd->subsampling_x);
+ const int have_bottom = av1_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size,
+ row_off, col_off, pd->subsampling_y);
if (xd->mi[0]->mbmi.palette_mode_info.palette_size[plane != 0] > 0) {
const int bs = 4 * num_4x4_blocks_wide_txsize_lookup[tx_size];
const int stride = 4 * (1 << bwl_in);
int r, c;
uint8_t *map = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
plane * PALETTE_MAX_SIZE;
#else
uint8_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
plane * PALETTE_MAX_SIZE;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
map = xd->plane[plane != 0].color_index_map;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
for (r = 0; r < bs; ++r)
@@ -1546,29 +1545,29 @@
for (r = 0; r < bs; ++r)
for (c = 0; c < bs; ++c)
dst[r * dst_stride + c] = palette[map[(r + y) * stride + c + x]];
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
build_intra_predictors_high(
xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
- have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
- have_top && have_right ? VPXMIN(txwpx, xr) : 0,
- have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
- have_bottom && have_left ? VPXMIN(txhpx, yd) : 0, plane);
+ have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+ have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+ have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+ have_bottom && have_left ? AOMMIN(txhpx, yd) : 0, plane);
return;
}
#endif
build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
- have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
- have_top && have_right ? VPXMIN(txwpx, xr) : 0,
- have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
- have_bottom && have_left ? VPXMIN(txhpx, yd) : 0,
+ have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+ have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+ have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+ have_bottom && have_left ? AOMMIN(txhpx, yd) : 0,
plane);
}
-void vp10_init_intra_predictors(void) {
- once(vp10_init_intra_predictors_internal);
+void av1_init_intra_predictors(void) {
+ once(av1_init_intra_predictors_internal);
}
diff --git a/av1/common/reconintra.h b/av1/common/reconintra.h
index d20b5a4..3adde50 100644
--- a/av1/common/reconintra.h
+++ b/av1/common/reconintra.h
@@ -8,27 +8,27 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_RECONINTRA_H_
-#define VP10_COMMON_RECONINTRA_H_
+#ifndef AV1_COMMON_RECONINTRA_H_
+#define AV1_COMMON_RECONINTRA_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "av1/common/blockd.h"
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_init_intra_predictors(void);
+void av1_init_intra_predictors(void);
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
- TX_SIZE tx_size, PREDICTION_MODE mode,
- const uint8_t *ref, int ref_stride, uint8_t *dst,
- int dst_stride, int aoff, int loff, int plane);
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+ TX_SIZE tx_size, PREDICTION_MODE mode,
+ const uint8_t *ref, int ref_stride, uint8_t *dst,
+ int dst_stride, int aoff, int loff, int plane);
#if CONFIG_EXT_INTRA
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
#endif // CONFIG_EXT_INTRA
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_RECONINTRA_H_
+#endif // AV1_COMMON_RECONINTRA_H_
diff --git a/av1/common/restoration.c b/av1/common/restoration.c
index 4d4c9fc..fad5dd6 100644
--- a/av1/common/restoration.c
+++ b/av1/common/restoration.c
@@ -10,12 +10,12 @@
#include <math.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#define BILATERAL_PARAM_PRECISION 16
@@ -55,15 +55,15 @@
typedef void (*restore_func_type)(uint8_t *data8, int width, int height,
int stride, RestorationInternal *rst,
uint8_t *tmpdata8, int tmpstride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*restore_func_highbd_type)(uint8_t *data8, int width, int height,
int stride, RestorationInternal *rst,
uint8_t *tmpdata8, int tmpstride,
int bit_depth);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static INLINE BilateralParamsType vp10_bilateral_level_to_params(int index,
- int kf) {
+static INLINE BilateralParamsType av1_bilateral_level_to_params(int index,
+ int kf) {
return kf ? bilateral_level_to_params_arr_kf[index]
: bilateral_level_to_params_arr[index];
}
@@ -77,31 +77,31 @@
{ 64, 64 }, { 128, 128 }, { 256, 256 }
};
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
- int *tile_width, int *tile_height,
- int *nhtiles, int *nvtiles) {
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+ int *tile_width, int *tile_height,
+ int *nhtiles, int *nvtiles) {
*tile_width = (tilesize < 0)
? width
- : VPXMIN(restoration_tile_sizes[tilesize].width, width);
+ : AOMMIN(restoration_tile_sizes[tilesize].width, width);
*tile_height = (tilesize < 0)
? height
- : VPXMIN(restoration_tile_sizes[tilesize].height, height);
+ : AOMMIN(restoration_tile_sizes[tilesize].height, height);
*nhtiles = (width + (*tile_width >> 1)) / *tile_width;
*nvtiles = (height + (*tile_height >> 1)) / *tile_height;
}
-int vp10_get_restoration_ntiles(int tilesize, int width, int height) {
+int av1_get_restoration_ntiles(int tilesize, int width, int height) {
int nhtiles, nvtiles;
int tile_width, tile_height;
- vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
- &tile_height, &nhtiles, &nvtiles);
+ av1_get_restoration_tile_size(tilesize, width, height, &tile_width,
+ &tile_height, &nhtiles, &nvtiles);
return (nhtiles * nvtiles);
}
-void vp10_loop_restoration_precal() {
+void av1_loop_restoration_precal() {
int i;
for (i = 0; i < BILATERAL_LEVELS_KF; i++) {
- const BilateralParamsType param = vp10_bilateral_level_to_params(i, 1);
+ const BilateralParamsType param = av1_bilateral_level_to_params(i, 1);
const int sigma_x = param.sigma_x;
const int sigma_y = param.sigma_y;
const int sigma_r = param.sigma_r;
@@ -129,7 +129,7 @@
}
}
for (i = 0; i < BILATERAL_LEVELS; i++) {
- const BilateralParamsType param = vp10_bilateral_level_to_params(i, 0);
+ const BilateralParamsType param = av1_bilateral_level_to_params(i, 0);
const int sigma_x = param.sigma_x;
const int sigma_y = param.sigma_y;
const int sigma_r = param.sigma_r;
@@ -159,13 +159,13 @@
}
}
-int vp10_bilateral_level_bits(const VP10_COMMON *const cm) {
+int av1_bilateral_level_bits(const AV1_COMMON *const cm) {
return cm->frame_type == KEY_FRAME ? BILATERAL_LEVEL_BITS_KF
: BILATERAL_LEVEL_BITS;
}
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
- int kf, int width, int height) {
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+ int kf, int width, int height) {
int i, tile_idx;
rst->restoration_type = rsi->restoration_type;
rst->subsampling_x = 0;
@@ -173,10 +173,10 @@
if (rsi->restoration_type == RESTORE_BILATERAL) {
rst->tilesize_index = BILATERAL_TILESIZE;
rst->ntiles =
- vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
- vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
- &rst->tile_width, &rst->tile_height,
- &rst->nhtiles, &rst->nvtiles);
+ av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+ av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+ &rst->tile_width, &rst->tile_height,
+ &rst->nhtiles, &rst->nvtiles);
rst->bilateral_level = rsi->bilateral_level;
rst->wr_lut = (uint8_t **)malloc(sizeof(*rst->wr_lut) * rst->ntiles);
assert(rst->wr_lut != NULL);
@@ -195,10 +195,10 @@
} else if (rsi->restoration_type == RESTORE_WIENER) {
rst->tilesize_index = WIENER_TILESIZE;
rst->ntiles =
- vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
- vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
- &rst->tile_width, &rst->tile_height,
- &rst->nhtiles, &rst->nvtiles);
+ av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+ av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+ &rst->tile_width, &rst->tile_height,
+ &rst->nhtiles, &rst->nvtiles);
rst->wiener_level = rsi->wiener_level;
rst->vfilter =
(int(*)[RESTORATION_WIN])malloc(sizeof(*rst->vfilter) * rst->ntiles);
@@ -373,7 +373,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void loop_bilateral_filter_highbd(uint8_t *data8, int width, int height,
int stride, RestorationInternal *rst,
uint8_t *tmpdata8, int tmpstride,
@@ -530,10 +530,10 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- int start_mi_row, int end_mi_row, int y_only) {
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ int start_mi_row, int end_mi_row, int y_only) {
const int ywidth = frame->y_crop_width;
const int ystride = frame->y_stride;
const int uvwidth = frame->uv_crop_width;
@@ -546,35 +546,35 @@
cm->rst_internal.restoration_type == RESTORE_BILATERAL
? loop_bilateral_filter
: loop_wiener_filter;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
restore_func_highbd_type restore_func_highbd =
cm->rst_internal.restoration_type == RESTORE_BILATERAL
? loop_bilateral_filter_highbd
: loop_wiener_filter_highbd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
YV12_BUFFER_CONFIG tmp_buf;
memset(&tmp_buf, 0, sizeof(YV12_BUFFER_CONFIG));
- yend = VPXMIN(yend, cm->height);
- uvend = VPXMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
+ yend = AOMMIN(yend, cm->height);
+ uvend = AOMMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
- if (vpx_realloc_frame_buffer(
+ if (aom_realloc_frame_buffer(
&tmp_buf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate tmp restoration buffer");
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth)
restore_func_highbd(frame->y_buffer + ystart * ystride, ywidth,
yend - ystart, ystride, &cm->rst_internal,
tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
tmp_buf.y_stride, cm->bit_depth);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
restore_func(frame->y_buffer + ystart * ystride, ywidth, yend - ystart,
ystride, &cm->rst_internal,
tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
@@ -582,7 +582,7 @@
if (!y_only) {
cm->rst_internal.subsampling_x = cm->subsampling_x;
cm->rst_internal.subsampling_y = cm->subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
restore_func_highbd(frame->u_buffer + uvstart * uvstride, uvwidth,
uvend - uvstart, uvstride, &cm->rst_internal,
@@ -593,7 +593,7 @@
tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
tmp_buf.uv_stride, cm->bit_depth);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
restore_func(frame->u_buffer + uvstart * uvstride, uvwidth,
uvend - uvstart, uvstride, &cm->rst_internal,
tmp_buf.u_buffer + uvstart * tmp_buf.uv_stride,
@@ -602,11 +602,11 @@
uvend - uvstart, uvstride, &cm->rst_internal,
tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
tmp_buf.uv_stride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
- vpx_free_frame_buffer(&tmp_buf);
+ aom_free_frame_buffer(&tmp_buf);
if (cm->rst_internal.restoration_type == RESTORE_BILATERAL) {
free(cm->rst_internal.wr_lut);
cm->rst_internal.wr_lut = NULL;
@@ -621,9 +621,9 @@
}
}
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- RestorationInfo *rsi, int y_only,
- int partial_frame) {
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ RestorationInfo *rsi, int y_only,
+ int partial_frame) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (rsi->restoration_type != RESTORE_NONE) {
start_mi_row = 0;
@@ -631,12 +631,12 @@
if (partial_frame && cm->mi_rows > 8) {
start_mi_row = cm->mi_rows >> 1;
start_mi_row &= 0xfffffff8;
- mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+ mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
- vp10_loop_restoration_init(&cm->rst_internal, rsi,
- cm->frame_type == KEY_FRAME, cm->width,
- cm->height);
- vp10_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
+ av1_loop_restoration_init(&cm->rst_internal, rsi,
+ cm->frame_type == KEY_FRAME, cm->width,
+ cm->height);
+ av1_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
}
}
diff --git a/av1/common/restoration.h b/av1/common/restoration.h
index c1e937a..6c53a77 100644
--- a/av1/common/restoration.h
+++ b/av1/common/restoration.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_RESTORATION_H_
-#define VP10_COMMON_RESTORATION_H_
+#ifndef AV1_COMMON_RESTORATION_H_
+#define AV1_COMMON_RESTORATION_H_
#include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/blockd.h"
@@ -88,22 +88,21 @@
int (*vfilter)[RESTORATION_WIN], (*hfilter)[RESTORATION_WIN];
} RestorationInternal;
-int vp10_bilateral_level_bits(const struct VP10Common *const cm);
-int vp10_get_restoration_ntiles(int tilesize, int width, int height);
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
- int *tile_width, int *tile_height,
- int *nhtiles, int *nvtiles);
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
- int kf, int width, int height);
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame,
- struct VP10Common *cm, RestorationInfo *rsi,
- int y_only, int partial_frame);
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame,
- struct VP10Common *cm, int start_mi_row,
- int end_mi_row, int y_only);
-void vp10_loop_restoration_precal();
+int av1_bilateral_level_bits(const struct AV1Common *const cm);
+int av1_get_restoration_ntiles(int tilesize, int width, int height);
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+ int *tile_width, int *tile_height,
+ int *nhtiles, int *nvtiles);
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+ int kf, int width, int height);
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+ RestorationInfo *rsi, int y_only,
+ int partial_frame);
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+ int start_mi_row, int end_mi_row, int y_only);
+void av1_loop_restoration_precal();
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_RESTORATION_H_
+#endif // AV1_COMMON_RESTORATION_H_
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 6bd3b74..908a2db 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/filter.h"
#include "av1/common/scale.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
static INLINE int scaled_x(int val, const struct scale_factors *sf) {
return (int)((int64_t)val * sf->x_scale_fp >> REF_SCALE_SHIFT);
@@ -34,7 +34,7 @@
return (other_size << REF_SCALE_SHIFT) / this_size;
}
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
@@ -42,13 +42,13 @@
return res;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
- int other_h, int this_w, int this_h,
- int use_highbd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+ int other_h, int this_w, int this_h,
+ int use_highbd) {
#else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
- int other_h, int this_w, int this_h) {
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+ int other_h, int this_w, int this_h) {
#endif
if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
sf->x_scale_fp = REF_INVALID_SCALE;
@@ -61,7 +61,7 @@
sf->x_step_q4 = scaled_x(16, sf);
sf->y_step_q4 = scaled_y(16, sf);
- if (vp10_is_scaled(sf)) {
+ if (av1_is_scaled(sf)) {
sf->scale_value_x = scaled_x;
sf->scale_value_y = scaled_y;
} else {
@@ -76,108 +76,108 @@
// best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel.
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
- sf->predict_ni[0][0][0] = vpx_convolve8_c;
- sf->predict_ni[0][0][1] = vpx_convolve8_avg_c;
- sf->predict_ni[0][1][0] = vpx_convolve8_c;
- sf->predict_ni[0][1][1] = vpx_convolve8_avg_c;
- sf->predict_ni[1][0][0] = vpx_convolve8_c;
- sf->predict_ni[1][0][1] = vpx_convolve8_avg_c;
- sf->predict_ni[1][1][0] = vpx_convolve8;
- sf->predict_ni[1][1][1] = vpx_convolve8_avg;
+ sf->predict_ni[0][0][0] = aom_convolve8_c;
+ sf->predict_ni[0][0][1] = aom_convolve8_avg_c;
+ sf->predict_ni[0][1][0] = aom_convolve8_c;
+ sf->predict_ni[0][1][1] = aom_convolve8_avg_c;
+ sf->predict_ni[1][0][0] = aom_convolve8_c;
+ sf->predict_ni[1][0][1] = aom_convolve8_avg_c;
+ sf->predict_ni[1][1][0] = aom_convolve8;
+ sf->predict_ni[1][1][1] = aom_convolve8_avg;
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
if (sf->x_step_q4 == 16) {
if (sf->y_step_q4 == 16) {
// No scaling in either direction.
- sf->predict[0][0][0] = vpx_convolve_copy;
- sf->predict[0][0][1] = vpx_convolve_avg;
- sf->predict[0][1][0] = vpx_convolve8_vert;
- sf->predict[0][1][1] = vpx_convolve8_avg_vert;
- sf->predict[1][0][0] = vpx_convolve8_horiz;
- sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+ sf->predict[0][0][0] = aom_convolve_copy;
+ sf->predict[0][0][1] = aom_convolve_avg;
+ sf->predict[0][1][0] = aom_convolve8_vert;
+ sf->predict[0][1][1] = aom_convolve8_avg_vert;
+ sf->predict[1][0][0] = aom_convolve8_horiz;
+ sf->predict[1][0][1] = aom_convolve8_avg_horiz;
} else {
// No scaling in x direction. Must always scale in the y direction.
- sf->predict[0][0][0] = vpx_convolve8_vert;
- sf->predict[0][0][1] = vpx_convolve8_avg_vert;
- sf->predict[0][1][0] = vpx_convolve8_vert;
- sf->predict[0][1][1] = vpx_convolve8_avg_vert;
- sf->predict[1][0][0] = vpx_convolve8;
- sf->predict[1][0][1] = vpx_convolve8_avg;
+ sf->predict[0][0][0] = aom_convolve8_vert;
+ sf->predict[0][0][1] = aom_convolve8_avg_vert;
+ sf->predict[0][1][0] = aom_convolve8_vert;
+ sf->predict[0][1][1] = aom_convolve8_avg_vert;
+ sf->predict[1][0][0] = aom_convolve8;
+ sf->predict[1][0][1] = aom_convolve8_avg;
}
} else {
if (sf->y_step_q4 == 16) {
// No scaling in the y direction. Must always scale in the x direction.
- sf->predict[0][0][0] = vpx_convolve8_horiz;
- sf->predict[0][0][1] = vpx_convolve8_avg_horiz;
- sf->predict[0][1][0] = vpx_convolve8;
- sf->predict[0][1][1] = vpx_convolve8_avg;
- sf->predict[1][0][0] = vpx_convolve8_horiz;
- sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+ sf->predict[0][0][0] = aom_convolve8_horiz;
+ sf->predict[0][0][1] = aom_convolve8_avg_horiz;
+ sf->predict[0][1][0] = aom_convolve8;
+ sf->predict[0][1][1] = aom_convolve8_avg;
+ sf->predict[1][0][0] = aom_convolve8_horiz;
+ sf->predict[1][0][1] = aom_convolve8_avg_horiz;
} else {
// Must always scale in both directions.
- sf->predict[0][0][0] = vpx_convolve8;
- sf->predict[0][0][1] = vpx_convolve8_avg;
- sf->predict[0][1][0] = vpx_convolve8;
- sf->predict[0][1][1] = vpx_convolve8_avg;
- sf->predict[1][0][0] = vpx_convolve8;
- sf->predict[1][0][1] = vpx_convolve8_avg;
+ sf->predict[0][0][0] = aom_convolve8;
+ sf->predict[0][0][1] = aom_convolve8_avg;
+ sf->predict[0][1][0] = aom_convolve8;
+ sf->predict[0][1][1] = aom_convolve8_avg;
+ sf->predict[1][0][0] = aom_convolve8;
+ sf->predict[1][0][1] = aom_convolve8_avg;
}
}
// 2D subpel motion always gets filtered in both directions
- sf->predict[1][1][0] = vpx_convolve8;
- sf->predict[1][1][1] = vpx_convolve8_avg;
+ sf->predict[1][1][0] = aom_convolve8;
+ sf->predict[1][1][1] = aom_convolve8_avg;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_highbd) {
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
- sf->highbd_predict_ni[0][0][0] = vpx_highbd_convolve8_c;
- sf->highbd_predict_ni[0][0][1] = vpx_highbd_convolve8_avg_c;
- sf->highbd_predict_ni[0][1][0] = vpx_highbd_convolve8_c;
- sf->highbd_predict_ni[0][1][1] = vpx_highbd_convolve8_avg_c;
- sf->highbd_predict_ni[1][0][0] = vpx_highbd_convolve8_c;
- sf->highbd_predict_ni[1][0][1] = vpx_highbd_convolve8_avg_c;
- sf->highbd_predict_ni[1][1][0] = vpx_highbd_convolve8;
- sf->highbd_predict_ni[1][1][1] = vpx_highbd_convolve8_avg;
+ sf->highbd_predict_ni[0][0][0] = aom_highbd_convolve8_c;
+ sf->highbd_predict_ni[0][0][1] = aom_highbd_convolve8_avg_c;
+ sf->highbd_predict_ni[0][1][0] = aom_highbd_convolve8_c;
+ sf->highbd_predict_ni[0][1][1] = aom_highbd_convolve8_avg_c;
+ sf->highbd_predict_ni[1][0][0] = aom_highbd_convolve8_c;
+ sf->highbd_predict_ni[1][0][1] = aom_highbd_convolve8_avg_c;
+ sf->highbd_predict_ni[1][1][0] = aom_highbd_convolve8;
+ sf->highbd_predict_ni[1][1][1] = aom_highbd_convolve8_avg;
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
if (sf->x_step_q4 == 16) {
if (sf->y_step_q4 == 16) {
// No scaling in either direction.
- sf->highbd_predict[0][0][0] = vpx_highbd_convolve_copy;
- sf->highbd_predict[0][0][1] = vpx_highbd_convolve_avg;
- sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
- sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
- sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
- sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+ sf->highbd_predict[0][0][0] = aom_highbd_convolve_copy;
+ sf->highbd_predict[0][0][1] = aom_highbd_convolve_avg;
+ sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+ sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+ sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+ sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
} else {
// No scaling in x direction. Must always scale in the y direction.
- sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_vert;
- sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_vert;
- sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
- sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
- sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
- sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+ sf->highbd_predict[0][0][0] = aom_highbd_convolve8_vert;
+ sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_vert;
+ sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+ sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+ sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+ sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
}
} else {
if (sf->y_step_q4 == 16) {
// No scaling in the y direction. Must always scale in the x direction.
- sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_horiz;
- sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_horiz;
- sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
- sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
- sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
- sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+ sf->highbd_predict[0][0][0] = aom_highbd_convolve8_horiz;
+ sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_horiz;
+ sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+ sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+ sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+ sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
} else {
// Must always scale in both directions.
- sf->highbd_predict[0][0][0] = vpx_highbd_convolve8;
- sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg;
- sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
- sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
- sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
- sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+ sf->highbd_predict[0][0][0] = aom_highbd_convolve8;
+ sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg;
+ sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+ sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+ sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+ sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
}
}
// 2D subpel motion always gets filtered in both directions.
- sf->highbd_predict[1][1][0] = vpx_highbd_convolve8;
- sf->highbd_predict[1][1][1] = vpx_highbd_convolve8_avg;
+ sf->highbd_predict[1][1][0] = aom_highbd_convolve8;
+ sf->highbd_predict[1][1][1] = aom_highbd_convolve8_avg;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
diff --git a/av1/common/scale.h b/av1/common/scale.h
index bb02601..0b49b68 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_SCALE_H_
-#define VP10_COMMON_SCALE_H_
+#ifndef AV1_COMMON_SCALE_H_
+#define AV1_COMMON_SCALE_H_
#include "av1/common/mv.h"
-#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/aom_convolve.h"
#ifdef __cplusplus
extern "C" {
@@ -32,37 +32,37 @@
int (*scale_value_y)(int val, const struct scale_factors *sf);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_convolve_fn_t highbd_predict[2][2][2]; // horiz, vert, avg
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Functions for non-interpolating filters (those that filter zero offsets)
#if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
convolve_fn_t predict_ni[2][2][2]; // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_convolve_fn_t highbd_predict_ni[2][2][2]; // horiz, vert, avg
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
};
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
- int other_h, int this_w, int this_h,
- int use_high);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+ int other_h, int this_w, int this_h,
+ int use_high);
#else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
- int other_h, int this_w, int this_h);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+ int other_h, int this_w, int this_h);
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
+static INLINE int av1_is_valid_scale(const struct scale_factors *sf) {
return sf->x_scale_fp != REF_INVALID_SCALE &&
sf->y_scale_fp != REF_INVALID_SCALE;
}
-static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
- return vp10_is_valid_scale(sf) &&
+static INLINE int av1_is_scaled(const struct scale_factors *sf) {
+ return av1_is_valid_scale(sf) &&
(sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
}
@@ -76,4 +76,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_SCALE_H_
+#endif // AV1_COMMON_SCALE_H_
diff --git a/av1/common/scan.c b/av1/common/scan.c
index dbc36eb..8fc4ca2 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -2817,69 +2817,69 @@
};
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15,
};
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x4[16]) = {
0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x4[16]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
};
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_4x4[16]) = {
0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_4x4[16]) = {
0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15,
};
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x8[32]) = {
0, 1, 4, 9, 2, 3, 6, 11, 5, 7, 8, 13, 10, 12, 14, 17,
15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x8[32]) = {
0, 8, 16, 24, 1, 9, 17, 25, 2, 10, 18, 26, 3, 11, 19, 27,
4, 12, 20, 28, 5, 13, 21, 29, 6, 14, 22, 30, 7, 15, 23, 31,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x8[32]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x4[32]) = {
0, 1, 4, 9, 15, 19, 24, 28, 2, 3, 6, 11, 16, 21, 25, 29,
5, 7, 8, 13, 18, 22, 26, 30, 10, 12, 14, 17, 20, 23, 27, 31,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x4[32]) = {
0, 4, 8, 12, 16, 20, 24, 28, 1, 5, 9, 13, 17, 21, 25, 29,
2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x4[32]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
};
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x8[64]) = {
0, 8, 16, 24, 32, 40, 48, 56, 1, 9, 17, 25, 33, 41, 49, 57,
2, 10, 18, 26, 34, 42, 50, 58, 3, 11, 19, 27, 35, 43, 51, 59,
4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61,
6, 14, 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x8[64]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
@@ -2887,21 +2887,21 @@
};
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_8x8[64]) = {
0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51,
2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56,
6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60,
14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_8x8[64]) = {
0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39,
6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x8[64]) = {
0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44,
3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53,
12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
@@ -2909,7 +2909,7 @@
};
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x16[128]) = {
0, 1, 3, 6, 10, 15, 21, 28, 2, 4, 7, 11, 16, 22, 29, 36,
5, 8, 12, 17, 23, 30, 37, 44, 9, 13, 18, 24, 31, 38, 45, 52,
14, 19, 25, 32, 39, 46, 53, 60, 20, 26, 33, 40, 47, 54, 61, 68,
@@ -2920,7 +2920,7 @@
91, 98, 105, 111, 116, 120, 123, 125, 99, 106, 112, 117, 121, 124, 126, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x8[128]) = {
0, 1, 3, 6, 10, 15, 21, 28, 36, 44, 52, 60, 68, 76, 84, 92,
2, 4, 7, 11, 16, 22, 29, 37, 45, 53, 61, 69, 77, 85, 93, 100,
5, 8, 12, 17, 23, 30, 38, 46, 54, 62, 70, 78, 86, 94, 101, 107,
@@ -2931,7 +2931,7 @@
35, 43, 51, 59, 67, 75, 83, 91, 99, 106, 112, 117, 121, 124, 126, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x16[128]) = {
0, 16, 32, 48, 64, 80, 96, 112, 1, 17, 33, 49, 65, 81, 97, 113,
2, 18, 34, 50, 66, 82, 98, 114, 3, 19, 35, 51, 67, 83, 99, 115,
4, 20, 36, 52, 68, 84, 100, 116, 5, 21, 37, 53, 69, 85, 101, 117,
@@ -2942,7 +2942,7 @@
14, 30, 46, 62, 78, 94, 110, 126, 15, 31, 47, 63, 79, 95, 111, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x8[128]) = {
0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120,
1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97, 105, 113, 121,
2, 10, 18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98, 106, 114, 122,
@@ -2953,7 +2953,7 @@
7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 87, 95, 103, 111, 119, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x16[128]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
@@ -2965,7 +2965,7 @@
120, 121, 122, 123, 124, 125, 126, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x8[128]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
@@ -2977,7 +2977,7 @@
120, 121, 122, 123, 124, 125, 126, 127,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x32[512]) = {
0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105,
120, 2, 4, 7, 11, 16, 22, 29, 37, 46, 56, 67, 79, 92, 106,
121, 136, 5, 8, 12, 17, 23, 30, 38, 47, 57, 68, 80, 93, 107,
@@ -3015,7 +3015,7 @@
510, 511,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x16[512]) = {
0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105,
120, 136, 152, 168, 184, 200, 216, 232, 248, 264, 280, 296, 312, 328, 344,
360, 376, 2, 4, 7, 11, 16, 22, 29, 37, 46, 56, 67, 79, 92,
@@ -3053,7 +3053,7 @@
510, 511,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x32[512]) = {
0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480,
1, 33, 65, 97, 129, 161, 193, 225, 257, 289, 321, 353, 385, 417, 449, 481,
2, 34, 66, 98, 130, 162, 194, 226, 258, 290, 322, 354, 386, 418, 450, 482,
@@ -3088,7 +3088,7 @@
31, 63, 95, 127, 159, 191, 223, 255, 287, 319, 351, 383, 415, 447, 479, 511,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x16[512]) = {
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
240, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464,
480, 496, 1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193,
@@ -3126,7 +3126,7 @@
495, 511,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x32[512]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
@@ -3164,7 +3164,7 @@
510, 511,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x16[512]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
@@ -3205,7 +3205,7 @@
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x16[256]) = {
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240,
1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209, 225, 241,
2, 18, 34, 50, 66, 82, 98, 114, 130, 146, 162, 178, 194, 210, 226, 242,
@@ -3224,7 +3224,7 @@
15, 31, 47, 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x16[256]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
@@ -3246,7 +3246,7 @@
};
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_16x16[256]) = {
0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198,
1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212,
2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216,
@@ -3265,7 +3265,7 @@
65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_16x16[256]) = {
0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76,
86, 3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99,
115, 130, 8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103,
@@ -3286,7 +3286,7 @@
255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x16[256]) = {
0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166,
179, 1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154,
178, 196, 3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148,
@@ -3308,7 +3308,7 @@
};
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x32[1024]) = {
0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416,
448, 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800, 832, 864,
896, 928, 960, 992, 1, 33, 65, 97, 129, 161, 193, 225, 257, 289,
@@ -3385,7 +3385,7 @@
991, 1023,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x32[1024]) = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
@@ -3468,7 +3468,7 @@
};
#endif // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x32[1024]) = {
0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145,
170, 193, 204, 210, 219, 229, 233, 245, 257, 275, 299, 342, 356,
377, 405, 455, 471, 495, 527, 1, 4, 8, 15, 22, 30, 45,
@@ -3551,7 +3551,7 @@
};
#if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_v2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_v2_iscan_32x32[1024]) = {
0, 1, 4, 9, 15, 22, 33, 43, 56, 71, 86, 104, 121,
142, 166, 189, 512, 518, 527, 539, 551, 566, 584, 602, 621, 644,
668, 695, 721, 748, 780, 811, 2, 3, 6, 11, 17, 26, 35,
@@ -3633,7 +3633,7 @@
978, 987, 995, 1002, 1008, 1013, 1017, 1020, 1022, 1023,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_h2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_h2_iscan_32x32[1024]) = {
0, 1, 4, 9, 15, 22, 33, 43, 56, 71, 86, 104, 121,
142, 166, 189, 214, 233, 254, 273, 292, 309, 328, 345, 362, 378,
397, 415, 431, 447, 464, 481, 2, 3, 6, 11, 17, 26, 35,
@@ -3715,7 +3715,7 @@
978, 987, 995, 1002, 1008, 1013, 1017, 1020, 1022, 1023,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_qtr_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_qtr_iscan_32x32[1024]) = {
0, 1, 4, 9, 15, 22, 33, 43, 56, 71, 86, 104, 121,
142, 166, 189, 256, 268, 286, 310, 334, 364, 400, 435, 471, 510,
553, 598, 640, 683, 732, 780, 2, 3, 6, 11, 17, 26, 35,
@@ -3798,371 +3798,369 @@
};
#endif // CONFIG_EXT_TX
-const scan_order vp10_default_scan_orders[TX_SIZES] = {
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
- default_scan_16x16_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
- default_scan_32x32_neighbors },
+const scan_order av1_default_scan_orders[TX_SIZES] = {
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_16x16, av1_default_iscan_16x16, default_scan_16x16_neighbors },
+ { default_scan_32x32, av1_default_iscan_32x32, default_scan_32x32_neighbors },
};
#if CONFIG_EXT_TX
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
{
// TX_4X4
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
},
{
// TX_8X8
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
},
{
// TX_16X16
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
+ { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
},
{
// TX_32X32
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
- { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
- { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+ { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+ { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
}
};
-const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
+const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
{
// TX_4X4
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
- { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
- { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
- { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
- { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
- { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
- { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+ { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+ { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+ { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+ { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+ { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+ { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
},
{
// TX_8X8
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
- { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
- { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
- { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
- { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
- { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
- { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+ { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+ { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+ { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+ { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+ { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+ { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
},
{
// TX_16X16
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
- { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
- { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
- { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
- { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
- { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
- { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+ { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+ { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+ { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+ { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+ { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+ { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+ { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
},
{
// TX_32X32
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
- { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
- { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
- { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
- { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+ { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+ { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+ { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+ { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
},
{
// TX_4X8
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
- { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
- { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
- { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
- { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
- { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
- { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
- { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+ { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+ { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+ { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+ { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+ { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+ { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+ { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
},
{
// TX_8X4
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
- { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
- { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
- { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
- { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
- { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
- { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
- { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+ { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+ { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+ { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+ { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+ { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+ { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+ { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
},
{
// TX_8X16
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { default_scan_8x16, vp10_default_iscan_8x16,
+ { default_scan_8x16, av1_default_iscan_8x16,
default_scan_8x16_neighbors },
- { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
- { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
- { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
- { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
- { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
- { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
- { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+ { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+ { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+ { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+ { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+ { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+ { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+ { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
},
{
// TX_16X8
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { default_scan_16x8, vp10_default_iscan_16x8,
+ { default_scan_16x8, av1_default_iscan_16x8,
default_scan_16x8_neighbors },
- { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
- { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
- { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
- { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
- { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
- { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
- { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+ { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+ { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+ { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+ { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+ { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+ { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+ { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
},
{
// TX_16X32
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { default_scan_16x32, vp10_default_iscan_16x32,
+ { default_scan_16x32, av1_default_iscan_16x32,
default_scan_16x32_neighbors },
- { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
- { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
- { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
- { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
- { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
- { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
- { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+ { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+ { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+ { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+ { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+ { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+ { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+ { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
},
{
// TX_32X16
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { default_scan_32x16, vp10_default_iscan_32x16,
+ { default_scan_32x16, av1_default_iscan_32x16,
default_scan_32x16_neighbors },
- { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
- { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
- { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
- { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
- { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
- { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
- { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+ { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+ { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+ { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+ { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+ { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+ { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+ { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
}
};
#else // CONFIG_EXT_TX
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
{ // TX_4X4
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors } },
{ // TX_8X8
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors } },
{ // TX_16X16
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors } },
{
// TX_32X32
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
}
};
diff --git a/av1/common/scan.h b/av1/common/scan.h
index d2d9f35..cba92e7 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_SCAN_H_
-#define VP10_COMMON_SCAN_H_
+#ifndef AV1_COMMON_SCAN_H_
+#define AV1_COMMON_SCAN_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#include "av1/common/enums.h"
@@ -29,8 +29,8 @@
const int16_t *neighbors;
} scan_order;
-extern const scan_order vp10_default_scan_orders[TX_SIZES];
-extern const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES];
+extern const scan_order av1_default_scan_orders[TX_SIZES];
+extern const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES];
static INLINE int get_coef_context(const int16_t *neighbors,
const uint8_t *token_cache, int c) {
@@ -41,26 +41,26 @@
static INLINE const scan_order *get_intra_scan(TX_SIZE tx_size,
TX_TYPE tx_type) {
- return &vp10_intra_scan_orders[tx_size][tx_type];
+ return &av1_intra_scan_orders[tx_size][tx_type];
}
#if CONFIG_EXT_TX
-extern const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
+extern const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
static INLINE const scan_order *get_inter_scan(TX_SIZE tx_size,
TX_TYPE tx_type) {
- return &vp10_inter_scan_orders[tx_size][tx_type];
+ return &av1_inter_scan_orders[tx_size][tx_type];
}
#endif // CONFIG_EXT_TX
static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type,
int is_inter) {
#if CONFIG_EXT_TX
- return is_inter ? &vp10_inter_scan_orders[tx_size][tx_type]
- : &vp10_intra_scan_orders[tx_size][tx_type];
+ return is_inter ? &av1_inter_scan_orders[tx_size][tx_type]
+ : &av1_intra_scan_orders[tx_size][tx_type];
#else
(void)is_inter;
- return &vp10_intra_scan_orders[tx_size][tx_type];
+ return &av1_intra_scan_orders[tx_size][tx_type];
#endif // CONFIG_EXT_TX
}
@@ -68,4 +68,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_SCAN_H_
+#endif // AV1_COMMON_SCAN_H_
diff --git a/av1/common/seg_common.c b/av1/common/seg_common.c
index f131c7b..9a5b8c8 100644
--- a/av1/common/seg_common.c
+++ b/av1/common/seg_common.c
@@ -25,26 +25,26 @@
// the coding mechanism is still subject to change so these provide a
// convenient single point of change.
-void vp10_clearall_segfeatures(struct segmentation *seg) {
- vp10_zero(seg->feature_data);
- vp10_zero(seg->feature_mask);
+void av1_clearall_segfeatures(struct segmentation *seg) {
+ av1_zero(seg->feature_data);
+ av1_zero(seg->feature_mask);
}
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id) {
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
seg->feature_mask[segment_id] |= 1 << feature_id;
}
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
return seg_feature_data_max[feature_id];
}
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
return seg_feature_data_signed[feature_id];
}
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id, int seg_data) {
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id, int seg_data) {
assert(seg_data <= seg_feature_data_max[feature_id]);
if (seg_data < 0) {
assert(seg_feature_data_signed[feature_id]);
@@ -54,7 +54,7 @@
seg->feature_data[segment_id][feature_id] = seg_data;
}
-const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
+const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
};
diff --git a/av1/common/seg_common.h b/av1/common/seg_common.h
index 7a8fa8f..f863ad8 100644
--- a/av1/common/seg_common.h
+++ b/av1/common/seg_common.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_SEG_COMMON_H_
-#define VP10_COMMON_SEG_COMMON_H_
+#ifndef AV1_COMMON_SEG_COMMON_H_
+#define AV1_COMMON_SEG_COMMON_H_
#include "aom_dsp/prob.h"
@@ -46,8 +46,8 @@
};
struct segmentation_probs {
- vpx_prob tree_probs[SEG_TREE_PROBS];
- vpx_prob pred_probs[PREDICTION_PROBS];
+ aom_prob tree_probs[SEG_TREE_PROBS];
+ aom_prob pred_probs[PREDICTION_PROBS];
};
static INLINE int segfeature_active(const struct segmentation *seg,
@@ -56,27 +56,27 @@
return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
}
-void vp10_clearall_segfeatures(struct segmentation *seg);
+void av1_clearall_segfeatures(struct segmentation *seg);
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id);
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id);
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id, int seg_data);
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id, int seg_data);
static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
return seg->feature_data[segment_id][feature_id];
}
-extern const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
+extern const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_SEG_COMMON_H_
+#endif // AV1_COMMON_SEG_COMMON_H_
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index ba91a46..13150e0 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/entropymode.h"
#include "av1/common/thread_common.h"
#include "av1/common/reconinter.h"
@@ -33,7 +33,7 @@
}
#endif // CONFIG_MULTITHREAD
-static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
+static INLINE void sync_read(AV1LfSync *const lf_sync, int r, int c) {
#if CONFIG_MULTITHREAD
const int nsync = lf_sync->sync_range;
@@ -53,7 +53,7 @@
#endif // CONFIG_MULTITHREAD
}
-static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
+static INLINE void sync_write(AV1LfSync *const lf_sync, int r, int c,
const int sb_cols) {
#if CONFIG_MULTITHREAD
const int nsync = lf_sync->sync_range;
@@ -86,9 +86,9 @@
// Implement row loopfiltering for each thread.
static INLINE void thread_loop_filter_rows(
- const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+ const YV12_BUFFER_CONFIG *const frame_buffer, AV1_COMMON *const cm,
struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
- int y_only, VP10LfSync *const lf_sync) {
+ int y_only, AV1LfSync *const lf_sync) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
const int sb_cols = mi_cols_aligned_to_sb(cm) >> cm->mib_size_log2;
int mi_row, mi_col;
@@ -123,28 +123,28 @@
sync_read(lf_sync, r, c);
- vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+ av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
#if CONFIG_EXT_PARTITION_TYPES
for (plane = 0; plane < num_planes; ++plane)
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
- mi_col);
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+ mi_col);
#else
// TODO(JBB): Make setup_mask work for non 420.
- vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+ av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
- vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
for (plane = 1; plane < num_planes; ++plane) {
switch (path) {
case LF_PATH_420:
- vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_444:
- vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_SLOW:
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
- mi_row, mi_col);
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ mi_row, mi_col);
break;
}
}
@@ -155,7 +155,7 @@
}
// Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP10LfSync *const lf_sync,
+static int loop_filter_row_worker(AV1LfSync *const lf_sync,
LFWorkerData *const lf_data) {
thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
lf_data->start, lf_data->stop, lf_data->y_only,
@@ -163,18 +163,18 @@
return 1;
}
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only,
- VPxWorker *workers, int nworkers,
- VP10LfSync *lf_sync) {
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ AVxWorker *workers, int nworkers,
+ AV1LfSync *lf_sync) {
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
// Number of superblock rows and cols
const int sb_rows = mi_rows_aligned_to_sb(cm) >> cm->mib_size_log2;
// Decoder may allocate more threads than number of tiles based on user's
// input.
const int tile_cols = cm->tile_cols;
- const int num_workers = VPXMIN(nworkers, tile_cols);
+ const int num_workers = AOMMIN(nworkers, tile_cols);
int i;
#if CONFIG_EXT_PARTITION
@@ -186,8 +186,8 @@
if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
num_workers > lf_sync->num_workers) {
- vp10_loop_filter_dealloc(lf_sync);
- vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+ av1_loop_filter_dealloc(lf_sync);
+ av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
}
// Initialize cur_sb_col to -1 for all SB rows.
@@ -202,15 +202,15 @@
// because of contention. If the multithreading code changes in the future
// then the number of workers used by the loopfilter should be revisited.
for (i = 0; i < num_workers; ++i) {
- VPxWorker *const worker = &workers[i];
+ AVxWorker *const worker = &workers[i];
LFWorkerData *const lf_data = &lf_sync->lfdata[i];
- worker->hook = (VPxWorkerHook)loop_filter_row_worker;
+ worker->hook = (AVxWorkerHook)loop_filter_row_worker;
worker->data1 = lf_sync;
worker->data2 = lf_data;
// Loopfilter data
- vp10_loop_filter_data_reset(lf_data, frame, cm, planes);
+ av1_loop_filter_data_reset(lf_data, frame, cm, planes);
lf_data->start = start + i * cm->mib_size;
lf_data->stop = stop;
lf_data->y_only = y_only;
@@ -229,11 +229,11 @@
}
}
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int frame_filter_level, int y_only,
- int partial_frame, VPxWorker *workers,
- int num_workers, VP10LfSync *lf_sync) {
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int frame_filter_level, int y_only,
+ int partial_frame, AVxWorker *workers,
+ int num_workers, AV1LfSync *lf_sync) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (!frame_filter_level) return;
@@ -243,10 +243,10 @@
if (partial_frame && cm->mi_rows > 8) {
start_mi_row = cm->mi_rows >> 1;
start_mi_row &= 0xfffffff8;
- mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+ mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
- vp10_loop_filter_frame_init(cm, frame_filter_level);
+ av1_loop_filter_frame_init(cm, frame_filter_level);
loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
workers, num_workers, lf_sync);
@@ -267,15 +267,15 @@
}
// Allocate memory for lf row synchronization
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
- int width, int num_workers) {
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
+ int width, int num_workers) {
lf_sync->rows = rows;
#if CONFIG_MULTITHREAD
{
int i;
CHECK_MEM_ERROR(cm, lf_sync->mutex_,
- vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
+ aom_malloc(sizeof(*lf_sync->mutex_) * rows));
if (lf_sync->mutex_) {
for (i = 0; i < rows; ++i) {
pthread_mutex_init(&lf_sync->mutex_[i], NULL);
@@ -283,7 +283,7 @@
}
CHECK_MEM_ERROR(cm, lf_sync->cond_,
- vpx_malloc(sizeof(*lf_sync->cond_) * rows));
+ aom_malloc(sizeof(*lf_sync->cond_) * rows));
if (lf_sync->cond_) {
for (i = 0; i < rows; ++i) {
pthread_cond_init(&lf_sync->cond_[i], NULL);
@@ -293,18 +293,18 @@
#endif // CONFIG_MULTITHREAD
CHECK_MEM_ERROR(cm, lf_sync->lfdata,
- vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
+ aom_malloc(num_workers * sizeof(*lf_sync->lfdata)));
lf_sync->num_workers = num_workers;
CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
- vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
+ aom_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
// Set up nsync.
lf_sync->sync_range = get_sync_range(width);
}
// Deallocate lf synchronization related mutex and data
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync) {
if (lf_sync != NULL) {
#if CONFIG_MULTITHREAD
int i;
@@ -313,26 +313,26 @@
for (i = 0; i < lf_sync->rows; ++i) {
pthread_mutex_destroy(&lf_sync->mutex_[i]);
}
- vpx_free(lf_sync->mutex_);
+ aom_free(lf_sync->mutex_);
}
if (lf_sync->cond_ != NULL) {
for (i = 0; i < lf_sync->rows; ++i) {
pthread_cond_destroy(&lf_sync->cond_[i]);
}
- vpx_free(lf_sync->cond_);
+ aom_free(lf_sync->cond_);
}
#endif // CONFIG_MULTITHREAD
- vpx_free(lf_sync->lfdata);
- vpx_free(lf_sync->cur_sb_col);
+ aom_free(lf_sync->lfdata);
+ aom_free(lf_sync->cur_sb_col);
// clear the structure as the source of this call may be a resize in which
// case this call will be followed by an _alloc() which may fail.
- vp10_zero(*lf_sync);
+ av1_zero(*lf_sync);
}
}
// Accumulate frame counts. FRAME_COUNTS consist solely of 'unsigned int'
// members, so we treat it as an array, and sum over the whole length.
-void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+void av1_accumulate_frame_counts(AV1_COMMON *cm, FRAME_COUNTS *counts) {
unsigned int *const acc = (unsigned int *)&cm->counts;
const unsigned int *const cnt = (unsigned int *)counts;
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index 3df9557..29085cb 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -8,21 +8,21 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_
-#define VP10_COMMON_LOOPFILTER_THREAD_H_
-#include "./vpx_config.h"
+#ifndef AV1_COMMON_LOOPFILTER_THREAD_H_
+#define AV1_COMMON_LOOPFILTER_THREAD_H_
+#include "./aom_config.h"
#include "av1/common/loopfilter.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
struct FRAME_COUNTS;
// Loopfilter row synchronization
-typedef struct VP10LfSyncData {
+typedef struct AV1LfSyncData {
#if CONFIG_MULTITHREAD
pthread_mutex_t *mutex_;
pthread_cond_t *cond_;
@@ -37,27 +37,27 @@
// Row-based parallel loopfilter data
LFWorkerData *lfdata;
int num_workers;
-} VP10LfSync;
+} AV1LfSync;
// Allocate memory for loopfilter row synchronization.
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
- int rows, int width, int num_workers);
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, struct AV1Common *cm, int rows,
+ int width, int num_workers);
// Deallocate loopfilter synchronization related mutex and data.
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync);
// Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int frame_filter_level, int y_only,
- int partial_frame, VPxWorker *workers,
- int num_workers, VP10LfSync *lf_sync);
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int frame_filter_level, int y_only,
+ int partial_frame, AVxWorker *workers,
+ int num_workers, AV1LfSync *lf_sync);
-void vp10_accumulate_frame_counts(struct VP10Common *cm,
- struct FRAME_COUNTS *counts);
+void av1_accumulate_frame_counts(struct AV1Common *cm,
+ struct FRAME_COUNTS *counts);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_LOOPFILTER_THREAD_H_
+#endif // AV1_COMMON_LOOPFILTER_THREAD_H_
diff --git a/av1/common/tile_common.c b/av1/common/tile_common.c
index e79734e..220cad9 100644
--- a/av1/common/tile_common.c
+++ b/av1/common/tile_common.c
@@ -10,21 +10,21 @@
#include "av1/common/tile_common.h"
#include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) {
+void av1_tile_set_row(TileInfo *tile, const AV1_COMMON *cm, int row) {
tile->mi_row_start = row * cm->tile_height;
- tile->mi_row_end = VPXMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
+ tile->mi_row_end = AOMMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
}
-void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) {
+void av1_tile_set_col(TileInfo *tile, const AV1_COMMON *cm, int col) {
tile->mi_col_start = col * cm->tile_width;
- tile->mi_col_end = VPXMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
+ tile->mi_col_end = AOMMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
}
-void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
- vp10_tile_set_row(tile, cm, row);
- vp10_tile_set_col(tile, cm, col);
+void av1_tile_init(TileInfo *tile, const AV1_COMMON *cm, int row, int col) {
+ av1_tile_set_row(tile, cm, row);
+ av1_tile_set_col(tile, cm, col);
}
#if !CONFIG_EXT_TILE
@@ -49,8 +49,8 @@
return max_log2 - 1;
}
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
- int *max_log2_tile_cols) {
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+ int *max_log2_tile_cols) {
const int max_sb_cols =
ALIGN_POWER_OF_TWO(mi_cols, MAX_MIB_SIZE_LOG2) >> MAX_MIB_SIZE_LOG2;
*min_log2_tile_cols = get_min_log2_tile_cols(max_sb_cols);
diff --git a/av1/common/tile_common.h b/av1/common/tile_common.h
index a502173..68d434a 100644
--- a/av1/common/tile_common.h
+++ b/av1/common/tile_common.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_TILE_COMMON_H_
-#define VP10_COMMON_TILE_COMMON_H_
+#ifndef AV1_COMMON_TILE_COMMON_H_
+#define AV1_COMMON_TILE_COMMON_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
typedef struct TileInfo {
int mi_row_start, mi_row_end;
@@ -24,17 +24,17 @@
// initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
// 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
- int col);
+void av1_tile_init(TileInfo *tile, const struct AV1Common *cm, int row,
+ int col);
-void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
-void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
+void av1_tile_set_row(TileInfo *tile, const struct AV1Common *cm, int row);
+void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
- int *max_log2_tile_cols);
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+ int *max_log2_tile_cols);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_TILE_COMMON_H_
+#endif // AV1_COMMON_TILE_COMMON_H_
diff --git a/av1/common/vp10_convolve.h b/av1/common/vp10_convolve.h
deleted file mode 100644
index 9343402..0000000
--- a/av1/common/vp10_convolve.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef VP10_COMMON_VP10_CONVOLVE_H_
-#define VP10_COMMON_VP10_CONVOLVE_H_
-#include "av1/common/filter.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
-#else
- const INTERP_FILTER interp_filter,
-#endif
- const int subpel_x, int xstep, const int subpel_y, int ystep,
- int avg);
-
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
- const INTERP_FILTER *interp_filter,
-#else
- const INTERP_FILTER interp_filter,
-#endif
- const int subpel_x, int xstep, const int subpel_y,
- int ystep, int avg, int bd);
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // VP10_COMMON_VP10_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm1d.h b/av1/common/vp10_fwd_txfm1d.h
deleted file mode 100644
index ab9d2ee..0000000
--- a/av1/common/vp10_fwd_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_FWD_TXFM1D_H_
-#define VP10_FWD_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // VP10_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm1d.h b/av1/common/vp10_inv_txfm1d.h
deleted file mode 100644
index 21b80bf..0000000
--- a/av1/common/vp10_inv_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_INV_TXFM1D_H_
-#define VP10_INV_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_idct4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // VP10_INV_TXFM1D_H_
diff --git a/av1/common/vp10_rtcd_defs.pl b/av1/common/vp10_rtcd_defs.pl
deleted file mode 100644
index 4a16723..0000000
--- a/av1/common/vp10_rtcd_defs.pl
+++ /dev/null
@@ -1,912 +0,0 @@
-sub vp10_common_forward_decls() {
-print <<EOF
-/*
- * VP10
- */
-
-#include "aom/vpx_integer.h"
-#include "av1/common/common.h"
-#include "av1/common/enums.h"
-#include "av1/common/quant_common.h"
-#include "av1/common/filter.h"
-#include "av1/common/vp10_txfm.h"
-
-struct macroblockd;
-
-/* Encoder forward decls */
-struct macroblock;
-struct vpx_variance_vtable;
-struct search_site_config;
-struct mv;
-union int_mv;
-struct yv12_buffer_config;
-EOF
-}
-forward_decls qw/vp10_common_forward_decls/;
-
-# functions that are 64 bit only.
-$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
-if ($opts{arch} eq "x86_64") {
- $mmx_x86_64 = 'mmx';
- $sse2_x86_64 = 'sse2';
- $ssse3_x86_64 = 'ssse3';
- $avx_x86_64 = 'avx';
- $avx2_x86_64 = 'avx2';
-}
-
-#
-# 10/12-tap convolution filters
-#
-add_proto qw/void vp10_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_horiz ssse3/;
-
-add_proto qw/void vp10_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_vert ssse3/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vp10_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
- specialize qw/vp10_highbd_convolve_horiz sse4_1/;
- add_proto qw/void vp10_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
- specialize qw/vp10_highbd_convolve_vert sse4_1/;
-}
-
-#
-# dct
-#
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- # Note as optimized versions of these functions are added we need to add a check to ensure
- # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add/;
-
- add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x8_32_add/;
-
- add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x4_32_add/;
-
- add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x16_128_add/;
-
- add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x8_128_add/;
-
- add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x32_512_add/;
-
- add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht32x16_512_add/;
-
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add/;
-
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add/;
-
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4/;
-
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1/;
-
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8/;
-
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1/;
-
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16/;
-
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1/;
-
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32/;
-
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd/;
-
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1/;
-
- add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct4x4/;
-
- add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8/;
-
- add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8_1/;
-
- add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16/;
-
- add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16_1/;
-
- add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32/;
-
- add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_rd/;
-
- add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_1/;
- } else {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add sse2/;
-
- add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x8_32_add/;
-
- add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x4_32_add/;
-
- add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x16_128_add/;
-
- add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x8_128_add/;
-
- add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x32_512_add/;
-
- add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht32x16_512_add/;
-
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add sse2/;
-
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add sse2/;
-
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4 sse2/;
-
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1 sse2/;
-
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8 sse2/;
-
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1 sse2/;
-
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16 sse2/;
-
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1 sse2/;
-
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32 sse2/;
-
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd sse2/;
-
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1 sse2/;
-
- add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct4x4 sse2/;
-
- add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8 sse2/;
-
- add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8_1/;
-
- add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16 sse2/;
-
- add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16_1/;
-
- add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32 sse2/;
-
- add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_rd sse2/;
-
- add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_1/;
- }
-} else {
- # Force C versions if CONFIG_EMULATE_HARDWARE is 1
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add/;
-
- add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x8_32_add/;
-
- add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x4_32_add/;
-
- add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x16_128_add/;
-
- add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x8_128_add/;
-
- add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x32_512_add/;
-
- add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht32x16_512_add/;
-
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add/;
-
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add/;
-
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4/;
-
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1/;
-
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8/;
-
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1/;
-
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16/;
-
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1/;
-
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32/;
-
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd/;
-
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1/;
- } else {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add sse2 neon dspr2/;
-
- add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x8_32_add/;
-
- add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x4_32_add/;
-
- add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x16_128_add/;
-
- add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x8_128_add/;
-
- add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht16x32_512_add/;
-
- add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht32x16_512_add/;
-
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add sse2 neon dspr2/;
-
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add sse2 dspr2/;
-
- if (vpx_config("CONFIG_EXT_TX") ne "yes") {
- specialize qw/vp10_iht4x4_16_add msa/;
- specialize qw/vp10_iht8x8_64_add msa/;
- specialize qw/vp10_iht16x16_256_add msa/;
- }
-
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4 sse2/;
-
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1 sse2/;
-
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8 sse2/;
-
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1 sse2/;
-
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16 sse2/;
-
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1 sse2/;
-
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32 sse2/;
-
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd sse2/;
-
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1 sse2/;
- }
-}
-
-if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
- add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/quantize_nuq/;
-
- add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/quantize_fp_nuq/;
-
- add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/quantize_32x32_nuq/;
-
- add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/quantize_32x32_fp_nuq/;
-}
-
-# EXT_INTRA predictor functions
-if (vpx_config("CONFIG_EXT_INTRA") eq "yes") {
- add_proto qw/void vp10_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_dc_filter_predictor sse4_1/;
- add_proto qw/void vp10_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_v_filter_predictor sse4_1/;
- add_proto qw/void vp10_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_h_filter_predictor sse4_1/;
- add_proto qw/void vp10_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d45_filter_predictor sse4_1/;
- add_proto qw/void vp10_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d135_filter_predictor sse4_1/;
- add_proto qw/void vp10_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d117_filter_predictor sse4_1/;
- add_proto qw/void vp10_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d153_filter_predictor sse4_1/;
- add_proto qw/void vp10_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d207_filter_predictor sse4_1/;
- add_proto qw/void vp10_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_d63_filter_predictor sse4_1/;
- add_proto qw/void vp10_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
- specialize qw/vp10_tm_filter_predictor sse4_1/;
- # High bitdepth functions
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vp10_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_dc_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_v_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_h_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d45_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d135_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d117_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d153_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d207_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_d63_filter_predictor sse4_1/;
- add_proto qw/void vp10_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vp10_highbd_tm_filter_predictor sse4_1/;
- }
-}
-
-# High bitdepth functions
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- #
- # Sub Pixel Filters
- #
- add_proto qw/void vp10_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve_copy/;
-
- add_proto qw/void vp10_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve_avg/;
-
- add_proto qw/void vp10_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8/, "$sse2_x86_64";
-
- add_proto qw/void vp10_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_horiz/, "$sse2_x86_64";
-
- add_proto qw/void vp10_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_vert/, "$sse2_x86_64";
-
- add_proto qw/void vp10_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg/, "$sse2_x86_64";
-
- add_proto qw/void vp10_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
- add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-
- #
- # dct
- #
- # Note as optimized versions of these functions are added we need to add a check to ensure
- # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- add_proto qw/void vp10_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht4x4_16_add/;
-
- add_proto qw/void vp10_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht4x8_32_add/;
-
- add_proto qw/void vp10_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht8x4_32_add/;
-
- add_proto qw/void vp10_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht8x16_128_add/;
-
- add_proto qw/void vp10_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht16x8_128_add/;
-
- add_proto qw/void vp10_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht16x32_512_add/;
-
- add_proto qw/void vp10_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht32x16_512_add/;
-
- add_proto qw/void vp10_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht8x8_64_add/;
-
- add_proto qw/void vp10_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
- specialize qw/vp10_highbd_iht16x16_256_add/;
-}
-
-#
-# Encoder functions below this point.
-#
-if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-
-# ENCODEMB INVOKE
-
-if (vpx_config("CONFIG_AOM_QM") eq "yes") {
- if (vpx_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
- # the transform coefficients are held in 32-bit
- # values, so the assembler code for vp10_block_error can no longer be used.
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error/;
-
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- specialize qw/vp10_fdct8x8_quant/;
- } else {
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
-
- add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
- specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
-
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- }
-} else {
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- # the transform coefficients are held in 32-bit
- # values, so the assembler code for vp10_block_error can no longer be used.
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error/;
-
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp/;
-
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp_32x32/;
-
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_fdct8x8_quant/;
- } else {
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error sse2 avx2 msa/;
-
- add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
- specialize qw/vp10_block_error_fp neon sse2/;
-
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp neon sse2/, "$ssse3_x86_64";
-
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp_32x32/, "$ssse3_x86_64";
-
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_fdct8x8_quant sse2 ssse3 neon/;
- }
-
-}
-
-# fdct functions
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x4 sse2/;
-
- add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x8/;
-
- add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x4/;
-
- add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x16/;
-
- add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x8/;
-
- add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x32/;
-
- add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht32x16/;
-
- add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x8 sse2/;
-
- add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x16 sse2/;
-
- add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht32x32/;
-
- add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fwht4x4/;
-} else {
- add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x4 sse2/;
-
- add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x8/;
-
- add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x4/;
-
- add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x16/;
-
- add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x8/;
-
- add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x32/;
-
- add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht32x16/;
-
- add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x8 sse2/;
-
- add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x16 sse2/;
-
- if (vpx_config("CONFIG_EXT_TX") ne "yes") {
- specialize qw/vp10_fht4x4 msa/;
- specialize qw/vp10_fht8x8 msa/;
- specialize qw/vp10_fht16x16 msa/;
- }
-
- add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht32x32/;
-
- add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fwht4x4/;
-}
-
-add_proto qw/void vp10_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
- specialize qw/vp10_fwd_idtx/;
-
-# Inverse transform
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- # Note as optimized versions of these functions are added we need to add a check to ensure
- # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add/;
-
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add/;
-
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add/;
-
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add/;
-
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add/;
-
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add/;
-
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add/;
-
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add/;
-
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add/;
-
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add/;
-
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add/;
-
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
-
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
-
- add_proto qw/void vp10_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_1_add/;
-
- add_proto qw/void vp10_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_1_add/;
-
- add_proto qw/void vp10_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_1_add/;
-
- add_proto qw/void vp10_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_1024_add/;
-
- add_proto qw/void vp10_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_34_add/;
-
- add_proto qw/void vp10_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_1_add/;
-
- add_proto qw/void vp10_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_iwht4x4_1_add/;
-
- add_proto qw/void vp10_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_iwht4x4_16_add/;
-
- # Force C versions if CONFIG_EMULATE_HARDWARE is 1
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_16_add/;
-
- add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_64_add/;
-
- add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_10_add/;
-
- add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_256_add/;
-
- add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_10_add/;
- } else {
- add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_16_add sse2/;
-
- add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_64_add sse2/;
-
- add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_10_add sse2/;
-
- add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_256_add sse2/;
-
- add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_10_add sse2/;
- } # CONFIG_EMULATE_HARDWARE
-} else {
- # Force C versions if CONFIG_EMULATE_HARDWARE is 1
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add/;
-
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add/;
-
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add/;
-
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add/;
-
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add/;
-
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add/;
-
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add/;
-
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add/;
-
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add/;
-
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add/;
-
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add/;
-
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
-
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
- } else {
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add sse2/;
-
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add sse2/;
-
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add sse2/;
-
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add sse2/;
-
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add sse2/;
-
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add sse2/;
-
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add sse2/;
-
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add sse2/;
-
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add sse2/;
-
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add sse2/;
-
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add sse2/;
-
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
-
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
- } # CONFIG_EMULATE_HARDWARE
-} # CONFIG_VP9_HIGHBITDEPTH
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- #fwd txfm
- add_proto qw/void vp10_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_fwd_txfm2d_4x4 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_fwd_txfm2d_8x8 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_fwd_txfm2d_16x16 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_fwd_txfm2d_32x32 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_fwd_txfm2d_64x64 sse4_1/;
-
- #inv txfm
- add_proto qw/void vp10_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_inv_txfm2d_add_4x4 sse4_1/;
- add_proto qw/void vp10_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_inv_txfm2d_add_8x8 sse4_1/;
- add_proto qw/void vp10_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_inv_txfm2d_add_16x16 sse4_1/;
- add_proto qw/void vp10_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_inv_txfm2d_add_32x32/;
- add_proto qw/void vp10_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- specialize qw/vp10_inv_txfm2d_add_64x64/;
-}
-
-#
-# Motion search
-#
-add_proto qw/int vp10_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
-specialize qw/vp10_full_search_sad sse3 sse4_1/;
-$vp10_full_search_sad_sse3=vp10_full_search_sadx3;
-$vp10_full_search_sad_sse4_1=vp10_full_search_sadx8;
-
-add_proto qw/int vp10_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_diamond_search_sad/;
-
-add_proto qw/int vp10_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_full_range_search/;
-
-add_proto qw/void vp10_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-specialize qw/vp10_temporal_filter_apply sse2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-
- # ENCODEMB INVOKE
- if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
- add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/highbd_quantize_nuq/;
-
- add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/highbd_quantize_fp_nuq/;
-
- add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/highbd_quantize_32x32_nuq/;
-
- add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
- specialize qw/highbd_quantize_32x32_fp_nuq/;
- }
-
- add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
- specialize qw/vp10_highbd_block_error sse2/;
-
- if (vpx_config("CONFIG_AOM_QM") eq "yes") {
- add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
- add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- } else {
- add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
- specialize qw/vp10_highbd_quantize_fp sse4_1/;
-
- add_proto qw/void vp10_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
- specialize qw/vp10_highbd_quantize_b/;
- }
-
- # fdct functions
- add_proto qw/void vp10_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht4x4 sse4_1/;
-
- add_proto qw/void vp10_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht4x8/;
-
- add_proto qw/void vp10_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht8x4/;
-
- add_proto qw/void vp10_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht8x16/;
-
- add_proto qw/void vp10_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht16x8/;
-
- add_proto qw/void vp10_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht16x32/;
-
- add_proto qw/void vp10_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht32x16/;
-
- add_proto qw/void vp10_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht8x8/;
-
- add_proto qw/void vp10_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht16x16/;
-
- add_proto qw/void vp10_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht32x32/;
-
- add_proto qw/void vp10_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fwht4x4/;
-
- add_proto qw/void vp10_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
- specialize qw/vp10_highbd_temporal_filter_apply/;
-
-}
-# End vp10_high encoder functions
-
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
- add_proto qw/uint64_t vp10_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
- specialize qw/vp10_wedge_sse_from_residuals sse2/;
- add_proto qw/int vp10_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
- specialize qw/vp10_wedge_sign_from_residuals sse2/;
- add_proto qw/void vp10_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
- specialize qw/vp10_wedge_compute_delta_squares sse2/;
-}
-
-}
-# end encoder functions
-1;
diff --git a/av1/common/warped_motion.c b/av1/common/warped_motion.c
index 5f76453..c742c36 100644
--- a/av1/common/warped_motion.c
+++ b/av1/common/warped_motion.c
@@ -353,7 +353,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_get_subcolumn(int taps, uint16_t *ref, int32_t *col,
int stride, int x, int y_start) {
int i;
@@ -522,7 +522,7 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static double warp_erroradv(WarpedMotionParams *wm, uint8_t *ref, int width,
int height, int stride, uint8_t *dst, int p_col,
@@ -574,48 +574,48 @@
}
}
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_hbd, int bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- uint8_t *ref, int width, int height, int stride,
- uint8_t *dst, int p_col, int p_row, int p_width,
- int p_height, int p_stride, int subsampling_x,
- int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_hbd, int bd,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ uint8_t *ref, int width, int height, int stride,
+ uint8_t *dst, int p_col, int p_row, int p_width,
+ int p_height, int p_stride, int subsampling_x,
+ int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_hbd)
return highbd_warp_erroradv(
wm, ref, width, height, stride, dst, p_col, p_row, p_width, p_height,
p_stride, subsampling_x, subsampling_y, x_scale, y_scale, bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return warp_erroradv(wm, ref, width, height, stride, dst, p_col, p_row,
p_width, p_height, p_stride, subsampling_x,
subsampling_y, x_scale, y_scale);
}
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_hbd, int bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- uint8_t *ref, int width, int height, int stride,
- uint8_t *pred, int p_col, int p_row, int p_width,
- int p_height, int p_stride, int subsampling_x,
- int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_hbd, int bd,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ uint8_t *ref, int width, int height, int stride,
+ uint8_t *pred, int p_col, int p_row, int p_width,
+ int p_height, int p_stride, int subsampling_x,
+ int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_hbd)
highbd_warp_plane(wm, ref, width, height, stride, pred, p_col, p_row,
p_width, p_height, p_stride, subsampling_x, subsampling_y,
x_scale, y_scale, bd);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
warp_plane(wm, ref, width, height, stride, pred, p_col, p_row, p_width,
p_height, p_stride, subsampling_x, subsampling_y, x_scale,
y_scale);
}
-void vp10_integerize_model(const double *model, TransformationType wmtype,
- WarpedMotionParams *wm) {
+void av1_integerize_model(const double *model, TransformationType wmtype,
+ WarpedMotionParams *wm) {
wm->wmtype = wmtype;
switch (wmtype) {
case HOMOGRAPHY:
diff --git a/av1/common/warped_motion.h b/av1/common/warped_motion.h
index a9c57f9..965b296 100644
--- a/av1/common/warped_motion.h
+++ b/av1/common/warped_motion.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_COMMON_WARPED_MOTION_H
-#define VP10_COMMON_WARPED_MOTION_H
+#ifndef AV1_COMMON_WARPED_MOTION_H
+#define AV1_COMMON_WARPED_MOTION_H
#include <stdio.h>
#include <stdlib.h>
@@ -17,9 +17,9 @@
#include <math.h>
#include <assert.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
// Bits of precision used for the model
#define WARPEDMODEL_PREC_BITS 8
@@ -72,25 +72,25 @@
int wmmat[8]; // For homography wmmat[9] is assumed to be 1
} WarpedMotionParams;
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_hbd, int bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- uint8_t *ref, int width, int height, int stride,
- uint8_t *dst, int p_col, int p_row, int p_width,
- int p_height, int p_stride, int subsampling_x,
- int subsampling_y, int x_scale, int y_scale);
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_hbd, int bd,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ uint8_t *ref, int width, int height, int stride,
+ uint8_t *dst, int p_col, int p_row, int p_width,
+ int p_height, int p_stride, int subsampling_x,
+ int subsampling_y, int x_scale, int y_scale);
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_hbd, int bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- uint8_t *ref, int width, int height, int stride,
- uint8_t *pred, int p_col, int p_row, int p_width,
- int p_height, int p_stride, int subsampling_x,
- int subsampling_y, int x_scale, int y_scale);
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_hbd, int bd,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ uint8_t *ref, int width, int height, int stride,
+ uint8_t *pred, int p_col, int p_row, int p_width,
+ int p_height, int p_stride, int subsampling_x,
+ int subsampling_y, int x_scale, int y_scale);
// Integerize model into the WarpedMotionParams structure
-void vp10_integerize_model(const double *model, TransformationType wmtype,
- WarpedMotionParams *wm);
-#endif // VP10_COMMON_WARPED_MOTION_H
+void av1_integerize_model(const double *model, TransformationType wmtype,
+ WarpedMotionParams *wm);
+#endif // AV1_COMMON_WARPED_MOTION_H
diff --git a/av1/common/x86/vp10_convolve_filters_ssse3.c b/av1/common/x86/av1_convolve_filters_ssse3.c
similarity index 99%
rename from av1/common/x86/vp10_convolve_filters_ssse3.c
rename to av1/common/x86/av1_convolve_filters_ssse3.c
index b842589..7a40b9c 100644
--- a/av1/common/x86/vp10_convolve_filters_ssse3.c
+++ b/av1/common/x86/av1_convolve_filters_ssse3.c
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/filter.h"
#if CONFIG_EXT_INTERP
diff --git a/av1/common/x86/vp10_convolve_ssse3.c b/av1/common/x86/av1_convolve_ssse3.c
similarity index 95%
rename from av1/common/x86/vp10_convolve_ssse3.c
rename to av1/common/x86/av1_convolve_ssse3.c
index e891d74..0c6bb99 100644
--- a/av1/common/x86/vp10_convolve_ssse3.c
+++ b/av1/common/x86/av1_convolve_ssse3.c
@@ -11,7 +11,7 @@
#include <assert.h>
#include <tmmintrin.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "av1/common/filter.h"
#define WIDTH_BOUND (16)
@@ -610,10 +610,10 @@
// (1) 10/12-taps filters
// (2) x_step_q4 = 16 then filter is fixed at the call
-void vp10_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_x_q4, int x_step_q4, int avg) {
DECLARE_ALIGNED(16, uint16_t, temp[8 * 8]);
__m128i verf[6];
__m128i horf[2];
@@ -630,18 +630,18 @@
(void)x_step_q4;
if (0 == subpel_x_q4 || 16 != x_step_q4) {
- vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_x_q4, x_step_q4, avg);
+ av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_x_q4, x_step_q4, avg);
return;
}
- hCoeffs = vp10_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
+ hCoeffs = av1_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
vCoeffs =
- vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+ av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
if (!hCoeffs || !vCoeffs) {
- vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_x_q4, x_step_q4, avg);
+ av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_x_q4, x_step_q4, avg);
return;
}
@@ -825,10 +825,10 @@
} while (rowIndex < h);
}
-void vp10_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams filter_params,
- const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int w, int h,
+ const InterpFilterParams filter_params,
+ const int subpel_y_q4, int y_step_q4, int avg) {
__m128i verf[6];
SubpelFilterCoeffs vCoeffs;
const uint8_t *src_ptr;
@@ -839,17 +839,17 @@
const int tapsNum = filter_params.taps;
if (0 == subpel_y_q4 || 16 != y_step_q4) {
- vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_y_q4, y_step_q4, avg);
+ av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_y_q4, y_step_q4, avg);
return;
}
vCoeffs =
- vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+ av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
if (!vCoeffs) {
- vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
- subpel_y_q4, y_step_q4, avg);
+ av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+ subpel_y_q4, y_step_q4, avg);
return;
}
diff --git a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
similarity index 99%
rename from av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
rename to av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
index e7d63fe..ecd3d4b 100644
--- a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
@@ -10,8 +10,8 @@
#include <emmintrin.h> // SSE2
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -22,31 +22,31 @@
#define ADD_EPI16 _mm_adds_epi16
#define SUB_EPI16 _mm_subs_epi16
#if FDCT32x32_HIGH_PRECISION
-void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
}
}
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rows_c
#else
-void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vp10_fdct32(temp_in, temp_out, 1);
+ av1_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rd_rows_c
#endif // FDCT32x32_HIGH_PRECISION
#else
#define ADD_EPI16 _mm_add_epi16
diff --git a/av1/common/x86/vp10_fwd_txfm1d_sse4.c b/av1/common/x86/av1_fwd_txfm1d_sse4.c
similarity index 98%
rename from av1/common/x86/vp10_fwd_txfm1d_sse4.c
rename to av1/common/x86/av1_fwd_txfm1d_sse4.c
index 902c9b2..f0bcef9 100644
--- a/av1/common/x86/vp10_fwd_txfm1d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm1d_sse4.c
@@ -1,7 +1,7 @@
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 4;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -53,8 +53,8 @@
}
}
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 8;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -152,8 +152,8 @@
}
}
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 16;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -349,8 +349,8 @@
}
}
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 32;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -764,8 +764,8 @@
}
}
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 4;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -835,8 +835,8 @@
}
}
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 8;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -960,8 +960,8 @@
}
}
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 16;
const int num_per_128 = 4;
const int32_t *cospi;
@@ -1199,8 +1199,8 @@
}
}
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
const int txfm_size = 32;
const int num_per_128 = 4;
const int32_t *cospi;
diff --git a/av1/common/x86/vp10_fwd_txfm2d_sse4.c b/av1/common/x86/av1_fwd_txfm2d_sse4.c
similarity index 72%
rename from av1/common/x86/vp10_fwd_txfm2d_sse4.c
rename to av1/common/x86/av1_fwd_txfm2d_sse4.c
index a59a0c8..07c283e 100644
--- a/av1/common/x86/vp10_fwd_txfm2d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm2d_sse4.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
static INLINE void int16_array_with_stride_to_int32_array_without_stride(
const int16_t *input, int stride, int32_t *output, int txfm1d_size) {
@@ -28,14 +28,14 @@
static INLINE TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
switch (txfm_type) {
- case TXFM_TYPE_DCT4: return vp10_fdct4_new_sse4_1; break;
- case TXFM_TYPE_DCT8: return vp10_fdct8_new_sse4_1; break;
- case TXFM_TYPE_DCT16: return vp10_fdct16_new_sse4_1; break;
- case TXFM_TYPE_DCT32: return vp10_fdct32_new_sse4_1; break;
- case TXFM_TYPE_ADST4: return vp10_fadst4_new_sse4_1; break;
- case TXFM_TYPE_ADST8: return vp10_fadst8_new_sse4_1; break;
- case TXFM_TYPE_ADST16: return vp10_fadst16_new_sse4_1; break;
- case TXFM_TYPE_ADST32: return vp10_fadst32_new_sse4_1; break;
+ case TXFM_TYPE_DCT4: return av1_fdct4_new_sse4_1; break;
+ case TXFM_TYPE_DCT8: return av1_fdct8_new_sse4_1; break;
+ case TXFM_TYPE_DCT16: return av1_fdct16_new_sse4_1; break;
+ case TXFM_TYPE_DCT32: return av1_fdct32_new_sse4_1; break;
+ case TXFM_TYPE_ADST4: return av1_fadst4_new_sse4_1; break;
+ case TXFM_TYPE_ADST8: return av1_fadst8_new_sse4_1; break;
+ case TXFM_TYPE_ADST16: return av1_fadst16_new_sse4_1; break;
+ case TXFM_TYPE_ADST32: return av1_fadst32_new_sse4_1; break;
default: assert(0);
}
return NULL;
@@ -69,18 +69,18 @@
transpose_32(txfm_size, buf_128, out_128);
}
-void vp10_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
- int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
+ int stride, int tx_type, int bd) {
int32_t txfm_buf[1024];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
(void)bd;
fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
}
-void vp10_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
- int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
+ int stride, int tx_type, int bd) {
int32_t txfm_buf[4096];
- TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
(void)bd;
fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
}
diff --git a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
similarity index 96%
rename from av1/common/x86/vp10_fwd_txfm_impl_sse2.h
rename to av1/common/x86/av1_fwd_txfm_impl_sse2.h
index 9bb8abc..ecaa97c 100644
--- a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
@@ -10,7 +10,7 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -98,7 +98,7 @@
_mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
if (test) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -169,7 +169,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -191,7 +191,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&t0, &t1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -230,7 +230,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -313,7 +313,7 @@
overflow =
check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
}
@@ -328,7 +328,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -371,7 +371,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -401,7 +401,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -414,7 +414,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -457,7 +457,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -720,7 +720,7 @@
overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
&input4, &input5, &input6, &input7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -740,7 +740,7 @@
check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
&step1_4, &step1_5, &step1_6, &step1_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -760,7 +760,7 @@
overflow =
check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -774,7 +774,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -796,7 +796,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -817,7 +817,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -830,7 +830,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -853,7 +853,7 @@
overflow =
check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -881,7 +881,7 @@
overflow =
check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -901,7 +901,7 @@
check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
&step3_4, &step3_5, &step3_6, &step3_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -924,7 +924,7 @@
overflow =
check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -944,7 +944,7 @@
check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
&step1_4, &step1_5, &step1_6, &step1_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -966,7 +966,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -987,7 +987,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
diff --git a/av1/common/x86/vp10_fwd_txfm_sse2.c b/av1/common/x86/av1_fwd_txfm_sse2.c
similarity index 84%
rename from av1/common/x86/vp10_fwd_txfm_sse2.c
rename to av1/common/x86/av1_fwd_txfm_sse2.c
index 05ec539..3a95071 100644
--- a/av1/common/x86/vp10_fwd_txfm_sse2.c
+++ b/av1/common/x86/av1_fwd_txfm_sse2.c
@@ -10,12 +10,12 @@
#include <emmintrin.h> // SSE2
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
-void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0, in1;
__m128i tmp;
const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
store_output(&in0, output);
}
-void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
__m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
__m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,8 +84,8 @@
store_output(&in1, output);
}
-void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
+void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+ int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
__m128i sum = _mm_setzero_si128();
@@ -153,8 +153,8 @@
store_output(&in1, output);
}
-void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
+void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+ int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
__m128i sum = _mm_setzero_si128();
@@ -226,47 +226,47 @@
}
#define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vp10_fdct4x4_sse2
-#define FDCT8x8_2D vp10_fdct8x8_sse2
-#define FDCT16x16_2D vp10_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h"
+#define FDCT4x4_2D av1_fdct4x4_sse2
+#define FDCT8x8_2D av1_fdct8x8_sse2
+#define FDCT16x16_2D av1_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vp10_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vp10_fdct32x32_sse2
+#define FDCT32x32_2D av1_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h" // NOLINT
+#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
+#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
+#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h" // NOLINT
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
similarity index 99%
rename from av1/common/x86/vp10_highbd_convolve_filters_sse4.c
rename to av1/common/x86/av1_highbd_convolve_filters_sse4.c
index 7f3630c..e2337fd 100644
--- a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
@@ -7,10 +7,10 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/filter.h"
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EXT_INTERP
DECLARE_ALIGNED(16, const int16_t,
sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -137,7 +137,7 @@
};
#endif
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EXT_INTERP
DECLARE_ALIGNED(16, const int16_t,
sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -264,7 +264,7 @@
};
#endif
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#if USE_TEMPORALFILTER_12TAP
DECLARE_ALIGNED(
16, const int16_t,
diff --git a/av1/common/x86/vp10_highbd_convolve_sse4.c b/av1/common/x86/av1_highbd_convolve_sse4.c
similarity index 91%
rename from av1/common/x86/vp10_highbd_convolve_sse4.c
rename to av1/common/x86/av1_highbd_convolve_sse4.c
index ea78400..705c963 100644
--- a/av1/common/x86/vp10_highbd_convolve_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_sse4.c
@@ -11,7 +11,7 @@
#include <assert.h>
#include <smmintrin.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "av1/common/filter.h"
typedef void (*TransposeSave)(const int width, int pixelsNum, uint32_t *src,
@@ -212,12 +212,12 @@
_mm_storeu_si128((__m128i *)buf, u[0]);
}
-void vp10_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
- uint16_t *dst, int dst_stride, int w,
- int h,
- const InterpFilterParams filter_params,
- const int subpel_x_q4, int x_step_q4,
- int avg, int bd) {
+void av1_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w,
+ int h,
+ const InterpFilterParams filter_params,
+ const int subpel_x_q4, int x_step_q4,
+ int avg, int bd) {
DECLARE_ALIGNED(16, uint32_t, temp[4 * 4]);
__m128i verf[6];
HbdSubpelFilterCoeffs vCoeffs;
@@ -228,18 +228,16 @@
(void)x_step_q4;
if (0 == subpel_x_q4 || 16 != x_step_q4) {
- vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_x_q4, x_step_q4, avg,
- bd);
+ av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_x_q4, x_step_q4, avg, bd);
return;
}
vCoeffs =
- vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+ av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
if (!vCoeffs) {
- vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_x_q4, x_step_q4, avg,
- bd);
+ av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_x_q4, x_step_q4, avg, bd);
return;
}
@@ -423,27 +421,27 @@
} while (rowIndex < h);
}
-void vp10_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
- uint16_t *dst, int dst_stride, int w,
- int h,
- const InterpFilterParams filter_params,
- const int subpel_y_q4, int y_step_q4,
- int avg, int bd) {
+void av1_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
+ uint16_t *dst, int dst_stride, int w,
+ int h,
+ const InterpFilterParams filter_params,
+ const int subpel_y_q4, int y_step_q4,
+ int avg, int bd) {
__m128i verf[6];
HbdSubpelFilterCoeffs vCoeffs;
const int tapsNum = filter_params.taps;
if (0 == subpel_y_q4 || 16 != y_step_q4) {
- vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_y_q4, y_step_q4, avg, bd);
+ av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_y_q4, y_step_q4, avg, bd);
return;
}
vCoeffs =
- vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+ av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
if (!vCoeffs) {
- vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
- filter_params, subpel_y_q4, y_step_q4, avg, bd);
+ av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params, subpel_y_q4, y_step_q4, avg, bd);
return;
}
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
similarity index 98%
rename from av1/common/x86/vp10_inv_txfm_sse2.c
rename to av1/common/x86/av1_inv_txfm_sse2.c
index b09933e..74a0d90 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
-#include "av1/common/x86/vp10_inv_txfm_sse2.h"
+#include "./av1_rtcd.h"
+#include "av1/common/x86/av1_inv_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
#define RECON_AND_STORE4X4(dest, in_x) \
@@ -21,7 +21,7 @@
*(int *)(dest) = _mm_cvtsi128_si32(d0); \
}
-void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
const __m128i cst = _mm_setr_epi16(
@@ -151,7 +151,7 @@
}
}
-void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -176,7 +176,7 @@
res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
}
-void vp10_idct4_sse2(__m128i *in) {
+void av1_idct4_sse2(__m128i *in) {
const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -212,7 +212,7 @@
in[1] = _mm_shuffle_epi32(in[1], 0x4E);
}
-void vp10_iadst4_sse2(__m128i *in) {
+void av1_iadst4_sse2(__m128i *in) {
const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@@ -446,7 +446,7 @@
out7 = _mm_subs_epi16(stp1_0, stp2_7); \
}
-void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -477,11 +477,11 @@
// 2-D
for (i = 0; i < 2; i++) {
- // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+ // 8x8 Transpose is copied from av1_fdct8x8_sse2()
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
- // 4-stage 1D vp10_idct8x8
+ // 4-stage 1D av1_idct8x8
IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
in6, in7);
}
@@ -515,7 +515,7 @@
RECON_AND_STORE(dest + 7 * stride, in7);
}
-void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -536,7 +536,7 @@
RECON_AND_STORE(dest + 7 * stride, dc_value);
}
-void vp10_idct8_sse2(__m128i *in) {
+void av1_idct8_sse2(__m128i *in) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
@@ -552,16 +552,16 @@
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+ // 8x8 Transpose is copied from av1_fdct8x8_sse2()
TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
in1, in2, in3, in4, in5, in6, in7);
- // 4-stage 1D vp10_idct8x8
+ // 4-stage 1D av1_idct8x8
IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
in[4], in[5], in[6], in[7]);
}
-void vp10_iadst8_sse2(__m128i *in) {
+void av1_iadst8_sse2(__m128i *in) {
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@@ -789,7 +789,7 @@
in[7] = _mm_sub_epi16(k__const_0, s1);
}
-void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1158,8 +1158,8 @@
stp2_12) \
}
-void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+ int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
const __m128i zero = _mm_setzero_si128();
@@ -1200,7 +1200,7 @@
curr1 = l;
for (i = 0; i < 2; i++) {
- // 1-D vp10_idct
+ // 1-D av1_idct
// Load input data.
in[0] = _mm_load_si128((const __m128i *)input);
@@ -1248,7 +1248,7 @@
}
for (i = 0; i < 2; i++) {
int j;
- // 1-D vp10_idct
+ // 1-D av1_idct
array_transpose_8x8(l + i * 8, in);
array_transpose_8x8(r + i * 8, in + 8);
@@ -1283,8 +1283,7 @@
}
}
-void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a, i;
@@ -1316,7 +1315,7 @@
}
}
-static void vp10_iadst16_8col(__m128i *in) {
+static void av1_iadst16_8col(__m128i *in) {
// perform 16x16 1-D ADST for 8 columns
__m128i s[16], x[16], u[32], v[32];
const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@@ -1786,7 +1785,7 @@
in[15] = _mm_sub_epi16(kZero, s[1]);
}
-static void vp10_idct16_8col(__m128i *in) {
+static void av1_idct16_8col(__m128i *in) {
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -2130,20 +2129,20 @@
in[15] = _mm_sub_epi16(s[0], s[15]);
}
-void vp10_idct16_sse2(__m128i *in0, __m128i *in1) {
+void av1_idct16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
- vp10_idct16_8col(in0);
- vp10_idct16_8col(in1);
+ av1_idct16_8col(in0);
+ av1_idct16_8col(in1);
}
-void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
+void av1_iadst16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
- vp10_iadst16_8col(in0);
- vp10_iadst16_8col(in1);
+ av1_iadst16_8col(in0);
+ av1_iadst16_8col(in1);
}
-void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+ int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
const __m128i zero = _mm_setzero_si128();
@@ -3016,12 +3015,12 @@
}
// Only upper-left 8x8 has non-zero coeff
-void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+ int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
- // vp10_idct constants for each stage
+ // av1_idct constants for each stage
const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
@@ -3173,13 +3172,13 @@
}
}
-void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+ int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
const __m128i zero = _mm_setzero_si128();
- // vp10_idct constants for each stage
+ // av1_idct constants for each stage
const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
@@ -3241,7 +3240,7 @@
for (i = 0; i < 4; i++) {
i32 = (i << 5);
- // First 1-D vp10_idct
+ // First 1-D av1_idct
// Load input data.
LOAD_DQCOEFF(in[0], input);
LOAD_DQCOEFF(in[8], input);
@@ -3391,7 +3390,7 @@
col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
}
for (i = 0; i < 4; i++) {
- // Second 1-D vp10_idct
+ // Second 1-D av1_idct
j = i << 3;
// Transpose 32x8 block to 8x32 block
@@ -3447,8 +3446,7 @@
}
}
-void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
- int stride) {
+void av1_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a, i;
@@ -3468,7 +3466,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
__m128i ubounded, retval;
const __m128i zero = _mm_set1_epi16(0);
@@ -3482,8 +3480,8 @@
return retval;
}
-void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
int i, j;
@@ -3516,7 +3514,7 @@
if (!test) {
// Do the row transform
- vp10_idct4_sse2(inptr);
+ av1_idct4_sse2(inptr);
// Check the min & max values
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3545,14 +3543,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct4_c(input, outptr, bd);
+ av1_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
}
if (optimised_cols) {
- vp10_idct4_sse2(inptr);
+ av1_idct4_sse2(inptr);
// Final round and shift
inptr[0] = _mm_add_epi16(inptr[0], eight);
@@ -3588,7 +3586,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_highbd_idct4_c(temp_in, temp_out, bd);
+ av1_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3597,8 +3595,8 @@
}
}
-void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
int i, j, test;
@@ -3632,7 +3630,7 @@
if (!test) {
// Do the row transform
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3662,14 +3660,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 8; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
}
if (optimised_cols) {
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
@@ -3688,7 +3686,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3697,8 +3695,8 @@
}
}
-void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
int i, j, test;
@@ -3733,7 +3731,7 @@
if (!test) {
// Do the row transform
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
@@ -3765,14 +3763,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
}
if (optimised_cols) {
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
@@ -3791,7 +3789,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3800,8 +3798,8 @@
}
}
-void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
int i, j, test;
@@ -3838,7 +3836,7 @@
if (!test) {
// Do the row transform
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3873,14 +3871,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 16; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
}
if (optimised_cols) {
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
@@ -3904,7 +3902,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3913,8 +3911,8 @@
}
}
-void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
+void av1_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
int i, j, test;
@@ -3953,7 +3951,7 @@
if (!test) {
// Do the row transform (N.B. This transposes inptr)
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
@@ -3991,14 +3989,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
}
if (optimised_cols) {
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
@@ -4022,7 +4020,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4030,4 +4028,4 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.h b/av1/common/x86/av1_inv_txfm_sse2.h
similarity index 97%
rename from av1/common/x86/vp10_inv_txfm_sse2.h
rename to av1/common/x86/av1_inv_txfm_sse2.h
index 0839ab9..a4cbbcf 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.h
+++ b/av1/common/x86/av1_inv_txfm_sse2.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
-#define VPX_DSP_X86_INV_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
+#define AOM_DSP_X86_INV_TXFM_SSE2_H_
#include <emmintrin.h> // SSE2
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "av1/common/av1_inv_txfm.h"
// perform 8x8 transpose
static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
@@ -181,4 +181,4 @@
void iadst8_sse2(__m128i *in);
void iadst16_sse2(__m128i *in0, __m128i *in1);
-#endif // VPX_DSP_X86_INV_TXFM_SSE2_H_
+#endif // AOM_DSP_X86_INV_TXFM_SSE2_H_
diff --git a/av1/common/x86/vp10_txfm1d_sse4.h b/av1/common/x86/av1_txfm1d_sse4.h
similarity index 68%
rename from av1/common/x86/vp10_txfm1d_sse4.h
rename to av1/common/x86/av1_txfm1d_sse4.h
index f05a54c..af7afb7 100644
--- a/av1/common/x86/vp10_txfm1d_sse4.h
+++ b/av1/common/x86/av1_txfm1d_sse4.h
@@ -1,52 +1,52 @@
-#ifndef VP10_TXMF1D_SSE2_H_
-#define VP10_TXMF1D_SSE2_H_
+#ifndef AV1_TXMF1D_SSE2_H_
+#define AV1_TXMF1D_SSE2_H_
#include <smmintrin.h>
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_txfm.h"
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct16_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct32_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
- const int8_t *cos_bit, const int8_t *stage_range);
static INLINE void transpose_32_4x4(int stride, const __m128i *input,
__m128i *output) {
@@ -141,4 +141,4 @@
}
#endif
-#endif // VP10_TXMF1D_SSE2_H_
+#endif // AV1_TXMF1D_SSE2_H_
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index f3686eb..eada3af 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -11,9 +11,9 @@
#include <assert.h>
#include <smmintrin.h> /* SSE4.1 */
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
#include "av1/common/x86/highbd_txfm_utility_sse4.h"
static INLINE void load_buffer_4x4(const int32_t *coeff, __m128i *in) {
@@ -229,8 +229,8 @@
_mm_storel_epi64((__m128i *)(output + 3 * stride), v3);
}
-void vp10_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
+ int stride, int tx_type, int bd) {
__m128i in[4];
const TXFM_2D_CFG *cfg = NULL;
@@ -695,8 +695,8 @@
_mm_store_si128((__m128i *)(output + 7 * stride), u7);
}
-void vp10_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
+ int stride, int tx_type, int bd) {
__m128i in[16], out[16];
const TXFM_2D_CFG *cfg = NULL;
@@ -1295,8 +1295,8 @@
round_shift_8x8(&in[48], shift);
}
-void vp10_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
+ int stride, int tx_type, int bd) {
__m128i in[64], out[64];
const TXFM_2D_CFG *cfg = NULL;
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 70bf9bf..e9f0ce8 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "aom_dsp/x86/inv_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
#include "aom_ports/mem.h"
@@ -56,8 +56,8 @@
} while (0)
#endif
-void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
__m128i in[2];
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
@@ -147,8 +147,8 @@
}
}
-void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
- int tx_type) {
+void av1_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+ int tx_type) {
__m128i in[8];
const __m128i zero = _mm_setzero_si128();
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -240,8 +240,8 @@
RECON_AND_STORE(dest + 7 * stride, in[7]);
}
-void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
- int stride, int tx_type) {
+void av1_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+ int stride, int tx_type) {
__m128i in[32];
__m128i *in0 = &in[0];
__m128i *in1 = &in[16];
diff --git a/av1/common/x86/reconintra_sse4.c b/av1/common/x86/reconintra_sse4.c
index cac34a6..ab1fa93 100644
--- a/av1/common/x86/reconintra_sse4.c
+++ b/av1/common/x86/reconintra_sse4.c
@@ -9,7 +9,7 @@
*/
#include <smmintrin.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "aom_ports/mem.h"
#include "av1/common/enums.h"
#include "av1/common/intra_filters.h"
@@ -498,86 +498,84 @@
GeneratePrediction(above, left, bs, prm, meanValue, dst, stride);
}
-void vp10_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, DC_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, V_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, H_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D45_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D135_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D117_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D153_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D207_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, D63_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
-void vp10_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
- const uint8_t *left) {
+void av1_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
__m128i prm[5];
GetIntraFilterParams(bs, TM_PRED, &prm[0]);
FilterPrediction(above, left, bs, prm, dst, stride);
}
// ============== High Bit Depth ==============
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int HighbdGetMeanValue4x4(const uint16_t *above,
const uint16_t *left, const int bd,
__m128i *params) {
@@ -809,83 +807,83 @@
HighbdGeneratePrediction(above, left, bs, bd, prm, meanValue, dst, stride);
}
-void vp10_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, DC_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, V_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, H_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D45_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D135_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D117_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D153_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D207_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, D63_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-void vp10_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
- const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
__m128i prm[5];
GetIntraFilterParams(bs, TM_PRED, &prm[0]);
HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/decoder/bitreader.h b/av1/decoder/bitreader.h
index 75d6aa4..aaf1bb8 100644
--- a/av1/decoder/bitreader.h
+++ b/av1/decoder/bitreader.h
@@ -11,28 +11,28 @@
/* The purpose of this header is to provide compile time pluggable bit reader
* implementations with a common interface. */
-#ifndef VPX10_DECODER_BITREADER_H_
-#define VPX10_DECODER_BITREADER_H_
+#ifndef AOM10_DECODER_BITREADER_H_
+#define AOM10_DECODER_BITREADER_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#if CONFIG_ANS
#include "av1/common/ans.h"
-#include "aom/vp8dx.h" // for vp10_decrypt_cb
-#define vp10_reader struct AnsDecoder
-#define vp10_reader_has_error ans_reader_has_error
-#define vp10_read uabs_read
-#define vp10_read_bit uabs_read_bit
-#define vp10_read_literal uabs_read_literal
-#define vp10_read_tree uabs_read_tree
+#include "aom/aomdx.h" // for av1_decrypt_cb
+#define aom_reader struct AnsDecoder
+#define aom_reader_has_error ans_reader_has_error
+#define aom_read uabs_read
+#define aom_read_bit uabs_read_bit
+#define aom_read_literal uabs_read_literal
+#define aom_read_tree uabs_read_tree
#else
#include "aom_dsp/bitreader.h"
-#define vp10_reader vpx_reader
-#define vp10_reader_has_error vpx_reader_has_error
-#define vp10_read vpx_read
-#define vp10_read_bit vpx_read_bit
-#define vp10_read_literal vpx_read_literal
-#define vp10_read_tree vpx_read_tree
+#define aom_reader aom_reader
+#define aom_reader_has_error aom_reader_has_error
+#define aom_read aom_read
+#define aom_read_bit aom_read_bit
+#define aom_read_literal aom_read_literal
+#define aom_read_tree aom_read_tree
#endif
-#endif // VPX10_DECODER_BITREADER_H_
+#endif // AOM10_DECODER_BITREADER_H_
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 0f90c20..1f1f358 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -11,20 +11,19 @@
#include <assert.h>
#include <stdlib.h> // qsort()
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "aom_dsp/bitreader_buffer.h"
#include "av1/decoder/bitreader.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/mem_ops.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
#if CONFIG_CLPF
@@ -51,9 +50,9 @@
#include "av1/decoder/decoder.h"
#include "av1/decoder/dsubexp.h"
-#define MAX_VPX_HEADER_SIZE 80
+#define MAX_AV1_HEADER_SIZE 80
-static int is_compound_reference_allowed(const VP10_COMMON *cm) {
+static int is_compound_reference_allowed(const AV1_COMMON *cm) {
int i;
if (frame_is_intra_only(cm)) return 0;
for (i = 1; i < INTER_REFS_PER_FRAME; ++i)
@@ -62,7 +61,7 @@
return 0;
}
-static void setup_compound_reference_mode(VP10_COMMON *cm) {
+static void setup_compound_reference_mode(AV1_COMMON *cm) {
#if CONFIG_EXT_REFS
cm->comp_fwd_ref[0] = LAST_FRAME;
cm->comp_fwd_ref[1] = LAST2_FRAME;
@@ -94,51 +93,51 @@
return len != 0 && len <= (size_t)(end - start);
}
-static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
- const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max));
+static int decode_unsigned_max(struct aom_read_bit_buffer *rb, int max) {
+ const int data = aom_rb_read_literal(rb, get_unsigned_bits(max));
return data > max ? max : data;
}
-static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? TX_MODE_SELECT : vpx_rb_read_literal(rb, 2);
+static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
}
-static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
- vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+ av1_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}
-static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i;
#if CONFIG_REF_MV
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->newmv_prob[i]);
+ av1_diff_update_prob(r, &fc->newmv_prob[i]);
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->zeromv_prob[i]);
+ av1_diff_update_prob(r, &fc->zeromv_prob[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->refmv_prob[i]);
+ av1_diff_update_prob(r, &fc->refmv_prob[i]);
for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->drl_prob[i]);
+ av1_diff_update_prob(r, &fc->drl_prob[i]);
#if CONFIG_EXT_INTER
- vp10_diff_update_prob(r, &fc->new2mv_prob);
+ av1_diff_update_prob(r, &fc->new2mv_prob);
#endif // CONFIG_EXT_INTER
#else
int j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
for (j = 0; j < INTER_MODES - 1; ++j)
- vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+ av1_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
#endif
}
#if CONFIG_EXT_INTER
-static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (j = 0; j < INTER_MODE_CONTEXTS; ++j) {
for (i = 0; i < INTER_COMPOUND_MODES - 1; ++i) {
- vp10_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
+ av1_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
}
}
}
@@ -146,28 +145,28 @@
#endif // CONFIG_EXT_INTER
static REFERENCE_MODE read_frame_reference_mode(
- const VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+ const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
if (is_compound_reference_allowed(cm)) {
- return vpx_rb_read_bit(rb)
+ return aom_rb_read_bit(rb)
? REFERENCE_MODE_SELECT
- : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
+ : (aom_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
} else {
return SINGLE_REFERENCE;
}
}
-static void read_frame_reference_mode_probs(VP10_COMMON *cm, vp10_reader *r) {
+static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
FRAME_CONTEXT *const fc = cm->fc;
int i, j;
if (cm->reference_mode == REFERENCE_MODE_SELECT)
for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
+ av1_diff_update_prob(r, &fc->comp_inter_prob[i]);
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; ++i) {
for (j = 0; j < (SINGLE_REFS - 1); ++j) {
- vp10_diff_update_prob(r, &fc->single_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->single_ref_prob[i][j]);
}
}
}
@@ -176,29 +175,29 @@
for (i = 0; i < REF_CONTEXTS; ++i) {
#if CONFIG_EXT_REFS
for (j = 0; j < (FWD_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
for (j = 0; j < (BWD_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
#else
for (j = 0; j < (COMP_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
#endif // CONFIG_EXT_REFS
}
}
}
-static void update_mv_probs(vpx_prob *p, int n, vp10_reader *r) {
+static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
int i;
- for (i = 0; i < n; ++i) vp10_diff_update_prob(r, &p[i]);
+ for (i = 0; i < n; ++i) av1_diff_update_prob(r, &p[i]);
}
-static void read_mv_probs(nmv_context *ctx, int allow_hp, vp10_reader *r) {
+static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
int i, j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_REF_MV
- vp10_diff_update_prob(r, &ctx->zero_rmv);
+ av1_diff_update_prob(r, &ctx->zero_rmv);
#endif
for (i = 0; i < 2; ++i) {
@@ -238,16 +237,16 @@
inv_txfm_param.eob = eob;
inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
inv_txfm_param.bd = xd->bd;
highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (eob == 1) {
dqcoeff[0] = 0;
@@ -272,7 +271,7 @@
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
MB_MODE_INFO *const mbmi,
int plane, int row, int col,
@@ -287,22 +286,21 @@
if (mbmi->sb_type < BLOCK_8X8)
if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
- vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
- pd->dst.stride, dst, pd->dst.stride, col, row,
- plane);
+ av1_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+ pd->dst.stride, dst, pd->dst.stride, col, row, plane);
if (!mbmi->skip) {
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 0);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
- tx_type, r, mbmi->segment_id);
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ tx_type, r, mbmi->segment_id);
inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
eob);
}
}
#if CONFIG_VAR_TX
-static void decode_reconstruct_tx(MACROBLOCKD *const xd, vp10_reader *r,
+static void decode_reconstruct_tx(MACROBLOCKD *const xd, aom_reader *r,
MB_MODE_INFO *const mbmi, int plane,
BLOCK_SIZE plane_bsize, int block,
int blk_row, int blk_col, TX_SIZE tx_size,
@@ -330,8 +328,8 @@
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, plane_tx_size);
const scan_order *sc = get_scan(plane_tx_size, tx_type, 1);
const int eob =
- vp10_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
- tx_type, r, mbmi->segment_id);
+ av1_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
+ tx_type, r, mbmi->segment_id);
inverse_transform_block(
xd, plane, tx_type, plane_tx_size,
&pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
@@ -363,7 +361,7 @@
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif
int segment_id, int plane, int row, int col,
TX_SIZE tx_size) {
@@ -372,8 +370,8 @@
int block_idx = (row << 1) + col;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 1);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
- tx_type, r, segment_id);
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ tx_type, r, segment_id);
inverse_transform_block(xd, plane, tx_type, tx_size,
&pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
@@ -385,8 +383,8 @@
static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl,
int n4_hl) {
// get minimum log2 num4x4s dimension
- const int x = VPXMIN(n4_wl, n4_hl);
- return VPXMIN(txsize_sqr_map[mbmi->tx_size], x);
+ const int x = AOMMIN(n4_wl, n4_hl);
+ return AOMMIN(txsize_sqr_map[mbmi->tx_size], x);
}
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
@@ -409,7 +407,7 @@
}
}
-static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static MB_MODE_INFO *set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int bw, int bh, int x_mis, int y_mis, int bwl,
int bhl) {
@@ -439,12 +437,12 @@
// as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
return &xd->mi[0]->mbmi;
}
#if CONFIG_SUPERTX
-static MB_MODE_INFO *set_offsets_extend(VP10_COMMON *const cm,
+static MB_MODE_INFO *set_offsets_extend(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
const TileInfo *const tile,
BLOCK_SIZE bsize_pred, int mi_row_pred,
@@ -471,10 +469,9 @@
return &xd->mi[0]->mbmi;
}
-static MB_MODE_INFO *set_mb_offsets(VP10_COMMON *const cm,
- MACROBLOCKD *const xd, BLOCK_SIZE bsize,
- int mi_row, int mi_col, int bw, int bh,
- int x_mis, int y_mis) {
+static MB_MODE_INFO *set_mb_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int bw, int bh, int x_mis, int y_mis) {
const int offset = mi_row * cm->mi_stride + mi_col;
const TileInfo *const tile = &xd->tile;
int x, y;
@@ -489,7 +486,7 @@
return &xd->mi[0]->mbmi;
}
-static void set_offsets_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_offsets_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const TileInfo *const tile, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -505,16 +502,16 @@
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
}
-static void set_param_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int txfm, int skip) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
- const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
@@ -534,21 +531,21 @@
#endif
}
-static void set_ref(VP10_COMMON *const cm, MACROBLOCKD *const xd, int idx,
+static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx,
int mi_row, int mi_col) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
xd->block_refs[idx] = ref_buffer;
- if (!vp10_is_valid_scale(&ref_buffer->sf))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_is_valid_scale(&ref_buffer->sf))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid scale factors");
- vp10_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
- &ref_buffer->sf);
+ av1_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
+ &ref_buffer->sf);
xd->corrupted |= ref_buffer->buf->corrupted;
}
static void dec_predict_b_extend(
- VP10Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
+ AV1Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred,
int mi_row_top, int mi_col_top, uint8_t *dst_buf[3], int dst_stride[3],
BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) {
@@ -564,7 +561,7 @@
const int mi_width_top = num_8x8_blocks_wide_lookup[bsize_top];
const int mi_height_top = num_8x8_blocks_high_lookup[bsize_top];
MB_MODE_INFO *mbmi;
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
mi_row_pred >= mi_row_top + mi_height_top ||
@@ -596,21 +593,21 @@
(c >> xd->plane[2].subsampling_x);
if (!b_sub8x8)
- vp10_build_inter_predictors_sb_extend(xd,
+ av1_build_inter_predictors_sb_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, bsize_pred);
+ mi_row_pred, mi_col_pred, bsize_pred);
else
- vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+ av1_build_inter_predictors_sb_sub8x8_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred,
- bsize_pred, block);
+ mi_row_pred, mi_col_pred,
+ bsize_pred, block);
}
-static void dec_extend_dir(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
@@ -678,7 +675,7 @@
}
}
-static void dec_extend_all(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_all(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
@@ -701,13 +698,12 @@
mi_row_top, mi_col_top, dst_buf, dst_stride, 7);
}
-static void dec_predict_sb_complex(VP10Decoder *const pbi,
- MACROBLOCKD *const xd,
+static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
uint8_t *dst_buf[3], int dst_stride[3]) {
- const VP10_COMMON *const cm = &pbi->common;
+ const AV1_COMMON *const cm = &pbi->common;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
const BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -725,7 +721,7 @@
int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -748,7 +744,7 @@
dst_buf3[0] = tmp_buf3;
dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
@@ -793,7 +789,7 @@
// weighted average to smooth the boundary
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
0);
@@ -827,7 +823,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -856,7 +852,7 @@
// Smooth
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
0);
@@ -890,7 +886,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -943,22 +939,22 @@
if (bsize == BLOCK_8X8 && i != 0)
continue; // Skip <4x4 chroma smoothing
if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
if (mi_row + hbs < cm->mi_rows) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
}
} else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -993,13 +989,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -1034,13 +1030,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -1073,7 +1069,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -1081,7 +1077,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -1114,7 +1110,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -1122,7 +1118,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -1133,14 +1129,13 @@
}
}
-static void set_segment_id_supertx(const VP10_COMMON *const cm,
- const int mi_row, const int mi_col,
- const BLOCK_SIZE bsize) {
+static void set_segment_id_supertx(const AV1_COMMON *const cm, const int mi_row,
+ const int mi_col, const BLOCK_SIZE bsize) {
const struct segmentation *seg = &cm->seg;
const int miw =
- VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+ AOMMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
const int mih =
- VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+ AOMMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
const int mi_offset = mi_row * cm->mi_stride + mi_col;
MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
int r, c;
@@ -1153,7 +1148,7 @@
for (r = 0; r < mih; r++)
for (c = 0; c < miw; c++)
seg_id_supertx =
- VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+ AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
}
@@ -1164,21 +1159,21 @@
}
#endif // CONFIG_SUPERTX
-static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif // CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize, int bwl, int bhl) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
const int bw = 1 << (bwl - 1);
const int bh = 1 << (bhl - 1);
- const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
#if CONFIG_SUPERTX
MB_MODE_INFO *mbmi;
@@ -1191,22 +1186,21 @@
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
- vp10_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis,
- y_mis);
+ av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis);
#else
MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
y_mis, bwl, bhl);
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
- vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+ av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
#endif // CONFIG_SUPERTX
if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
const BLOCK_SIZE uv_subsize =
ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
if (uv_subsize == BLOCK_INVALID)
- vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
"Invalid block size.");
}
@@ -1214,7 +1208,7 @@
mbmi->segment_id_supertx = MAX_SEGMENTS;
if (supertx_enabled) {
- xd->corrupted |= vp10_reader_has_error(r);
+ xd->corrupted |= aom_reader_has_error(r);
return;
}
#endif // CONFIG_SUPERTX
@@ -1226,7 +1220,7 @@
int plane;
for (plane = 0; plane <= 1; ++plane) {
if (mbmi->palette_mode_info.palette_size[plane])
- vp10_decode_palette_tokens(xd, plane, r);
+ av1_decode_palette_tokens(xd, plane, r);
}
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -1254,17 +1248,16 @@
}
} else {
// Prediction
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col,
- VPXMAX(bsize, BLOCK_8X8));
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_OBMC
if (mbmi->motion_variation == OBMC_CAUSAL) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
@@ -1274,7 +1267,7 @@
int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
assert(mbmi->sb_type >= BLOCK_8X8);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -1284,25 +1277,23 @@
dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
dst_buf2[0] = tmp_buf2;
dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
- dst_width1, dst_height1,
- dst_stride1);
- vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
- dst_width2, dst_height2, dst_stride2);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
- mi_col);
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
- dst_stride1, dst_buf2, dst_stride2);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_width1, dst_height1, dst_stride1);
+ av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_width2, dst_height2, dst_stride2);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
}
#endif // CONFIG_OBMC
@@ -1319,7 +1310,7 @@
#if CONFIG_VAR_TX
// TODO(jingning): This can be simplified for decoder performance.
const BLOCK_SIZE plane_bsize =
- get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+ get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
int bw = num_4x4_blocks_wide_txsize_lookup[max_tx_size];
int bh = num_4x4_blocks_high_txsize_lookup[max_tx_size];
@@ -1385,7 +1376,7 @@
}
}
- xd->corrupted |= vp10_reader_has_error(r);
+ xd->corrupted |= aom_reader_has_error(r);
}
static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
@@ -1416,31 +1407,31 @@
}
#endif // !CONFIG_EXT_PARTITION_TYPES
-static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col, vp10_reader *r,
+static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col, aom_reader *r,
int has_rows, int has_cols,
#if CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize,
#endif
int bsl) {
const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
- const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+ const aom_prob *const probs = cm->fc->partition_prob[ctx];
FRAME_COUNTS *counts = xd->counts;
PARTITION_TYPE p;
if (has_rows && has_cols)
#if CONFIG_EXT_PARTITION_TYPES
if (bsize <= BLOCK_8X8)
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
else
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_ext_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_ext_partition_tree, probs);
#else
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
#endif // CONFIG_EXT_PARTITION_TYPES
else if (!has_rows && has_cols)
- p = vp10_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
+ p = aom_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
else if (has_rows && !has_cols)
- p = vp10_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
+ p = aom_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
else
p = PARTITION_SPLIT;
@@ -1450,13 +1441,13 @@
}
#if CONFIG_SUPERTX
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
- vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
- const int ctx = vp10_get_skip_context(xd);
- const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+ const int ctx = av1_get_skip_context(xd);
+ const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
@@ -1465,13 +1456,13 @@
#endif // CONFIG_SUPERTX
// TODO(slavarnway): eliminate bsize and subsize in future commits
-static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
BLOCK_SIZE bsize, int n4x4_l2) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int n8x8_l2 = n4x4_l2 - 1;
const int num_8x8_wh = 1 << n8x8_l2;
const int hbs = num_8x8_wh >> 1;
@@ -1503,7 +1494,7 @@
bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) {
const int supertx_context = partition_supertx_context_lookup[partition];
supertx_enabled =
- vp10_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
+ aom_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
if (xd->counts)
xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++;
#if CONFIG_VAR_TX
@@ -1704,21 +1695,21 @@
if (get_ext_tx_types(supertx_size, bsize, 1) > 1) {
int eset = get_ext_tx_set(supertx_size, bsize, 1);
if (eset > 0) {
- txfm = vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][supertx_size]);
+ txfm = aom_read_tree(r, av1_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][supertx_size]);
if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
}
}
#else
if (supertx_size < TX_32X32) {
- txfm = vp10_read_tree(r, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[supertx_size]);
+ txfm = aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[supertx_size]);
if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
}
#endif // CONFIG_EXT_TX
}
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (i = 0; i < MAX_MB_PLANE; i++) {
dst_buf[i] = xd->plane[i].dst.buf;
dst_stride[i] = xd->plane[i].dst.stride;
@@ -1800,7 +1791,7 @@
if (bsize == BLOCK_64X64) {
if (cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
- vpx_read_literal(r, DERING_REFINEMENT_BITS);
+ aom_read_literal(r, DERING_REFINEMENT_BITS);
} else {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
0;
@@ -1813,26 +1804,26 @@
#if !CONFIG_ANS
static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
- struct vpx_internal_error_info *error_info,
- vp10_reader *r, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ aom_reader *r, aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
- if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
- vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+ if (aom_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
+ aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
#else
static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
- struct vpx_internal_error_info *error_info,
+ struct aom_internal_error_info *error_info,
struct AnsDecoder *const ans,
- vpx_decrypt_cb decrypt_cb,
+ aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
(void)decrypt_cb;
(void)decrypt_state;
@@ -1840,104 +1831,103 @@
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (read_size > INT_MAX || ans_read_init(ans, data, (int)read_size))
- vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate token decoder %d", 1);
}
#endif
-static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
- vp10_reader *r) {
+static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
+ aom_reader *r) {
int i, j, k, l, m;
- if (vp10_read_bit(r))
+ if (aom_read_bit(r))
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
- vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+ av1_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
}
-static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
- vp10_reader *r) {
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
read_coef_probs_common(fc->coef_probs[tx_size], r);
#if CONFIG_ANS
- vp10_coef_pareto_cdfs(fc);
+ av1_coef_pareto_cdfs(fc);
#endif // CONFIG_ANS
}
-static void setup_segmentation(VP10_COMMON *const cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_segmentation(AV1_COMMON *const cm,
+ struct aom_read_bit_buffer *rb) {
struct segmentation *const seg = &cm->seg;
int i, j;
seg->update_map = 0;
seg->update_data = 0;
- seg->enabled = vpx_rb_read_bit(rb);
+ seg->enabled = aom_rb_read_bit(rb);
if (!seg->enabled) return;
// Segmentation map update
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
seg->update_map = 1;
} else {
- seg->update_map = vpx_rb_read_bit(rb);
+ seg->update_map = aom_rb_read_bit(rb);
}
if (seg->update_map) {
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
seg->temporal_update = 0;
} else {
- seg->temporal_update = vpx_rb_read_bit(rb);
+ seg->temporal_update = aom_rb_read_bit(rb);
}
}
// Segmentation data update
- seg->update_data = vpx_rb_read_bit(rb);
+ seg->update_data = aom_rb_read_bit(rb);
if (seg->update_data) {
- seg->abs_delta = vpx_rb_read_bit(rb);
+ seg->abs_delta = aom_rb_read_bit(rb);
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
int data = 0;
- const int feature_enabled = vpx_rb_read_bit(rb);
+ const int feature_enabled = aom_rb_read_bit(rb);
if (feature_enabled) {
- vp10_enable_segfeature(seg, i, j);
- data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
- if (vp10_is_segfeature_signed(j))
- data = vpx_rb_read_bit(rb) ? -data : data;
+ av1_enable_segfeature(seg, i, j);
+ data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
+ if (av1_is_segfeature_signed(j))
+ data = aom_rb_read_bit(rb) ? -data : data;
}
- vp10_set_segdata(seg, i, j, data);
+ av1_set_segdata(seg, i, j, data);
}
}
}
}
#if CONFIG_LOOP_RESTORATION
-static void setup_restoration(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_restoration(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int i;
RestorationInfo *rsi = &cm->rst_info;
int ntiles;
- if (vpx_rb_read_bit(rb)) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
rsi->restoration_type = RESTORE_BILATERAL;
- ntiles = vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width,
- cm->height);
- rsi->bilateral_level = (int *)vpx_realloc(
+ ntiles =
+ av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+ rsi->bilateral_level = (int *)aom_realloc(
rsi->bilateral_level, sizeof(*rsi->bilateral_level) * ntiles);
assert(rsi->bilateral_level != NULL);
for (i = 0; i < ntiles; ++i) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
rsi->bilateral_level[i] =
- vpx_rb_read_literal(rb, vp10_bilateral_level_bits(cm));
+ aom_rb_read_literal(rb, av1_bilateral_level_bits(cm));
} else {
rsi->bilateral_level[i] = -1;
}
@@ -1945,30 +1935,30 @@
} else {
rsi->restoration_type = RESTORE_WIENER;
ntiles =
- vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
- rsi->wiener_level = (int *)vpx_realloc(
+ av1_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+ rsi->wiener_level = (int *)aom_realloc(
rsi->wiener_level, sizeof(*rsi->wiener_level) * ntiles);
assert(rsi->wiener_level != NULL);
- rsi->vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ rsi->vfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
rsi->vfilter, sizeof(*rsi->vfilter) * ntiles);
assert(rsi->vfilter != NULL);
- rsi->hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ rsi->hfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
rsi->hfilter, sizeof(*rsi->hfilter) * ntiles);
assert(rsi->hfilter != NULL);
for (i = 0; i < ntiles; ++i) {
- rsi->wiener_level[i] = vpx_rb_read_bit(rb);
+ rsi->wiener_level[i] = aom_rb_read_bit(rb);
if (rsi->wiener_level[i]) {
- rsi->vfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+ rsi->vfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
WIENER_FILT_TAP0_MINV;
- rsi->vfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+ rsi->vfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
WIENER_FILT_TAP1_MINV;
- rsi->vfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+ rsi->vfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
WIENER_FILT_TAP2_MINV;
- rsi->hfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+ rsi->hfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
WIENER_FILT_TAP0_MINV;
- rsi->hfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+ rsi->hfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
WIENER_FILT_TAP1_MINV;
- rsi->hfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+ rsi->hfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
WIENER_FILT_TAP2_MINV;
} else {
rsi->vfilter[i][0] = rsi->vfilter[i][1] = rsi->vfilter[i][2] = 0;
@@ -1982,60 +1972,60 @@
}
#endif // CONFIG_LOOP_RESTORATION
-static void setup_loopfilter(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
struct loopfilter *lf = &cm->lf;
- lf->filter_level = vpx_rb_read_literal(rb, 6);
- lf->sharpness_level = vpx_rb_read_literal(rb, 3);
+ lf->filter_level = aom_rb_read_literal(rb, 6);
+ lf->sharpness_level = aom_rb_read_literal(rb, 3);
// Read in loop filter deltas applied at the MB level based on mode or ref
// frame.
lf->mode_ref_delta_update = 0;
- lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb);
+ lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
if (lf->mode_ref_delta_enabled) {
- lf->mode_ref_delta_update = vpx_rb_read_bit(rb);
+ lf->mode_ref_delta_update = aom_rb_read_bit(rb);
if (lf->mode_ref_delta_update) {
int i;
for (i = 0; i < TOTAL_REFS_PER_FRAME; i++)
- if (vpx_rb_read_bit(rb))
- lf->ref_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+ if (aom_rb_read_bit(rb))
+ lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
- if (vpx_rb_read_bit(rb))
- lf->mode_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+ if (aom_rb_read_bit(rb))
+ lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
}
}
}
#if CONFIG_CLPF
-static void setup_clpf(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
- cm->clpf = vpx_rb_read_literal(rb, 1);
+static void setup_clpf(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ cm->clpf = aom_rb_read_literal(rb, 1);
}
#endif
#if CONFIG_DERING
-static void setup_dering(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
- cm->dering_level = vpx_rb_read_literal(rb, DERING_LEVEL_BITS);
+static void setup_dering(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ cm->dering_level = aom_rb_read_literal(rb, DERING_LEVEL_BITS);
}
#endif // CONFIG_DERING
-static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? vpx_rb_read_inv_signed_literal(rb, 6) : 0;
+static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
}
-static void setup_quantization(VP10_COMMON *const cm,
- struct vpx_read_bit_buffer *rb) {
- cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS);
+static void setup_quantization(AV1_COMMON *const cm,
+ struct aom_read_bit_buffer *rb) {
+ cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
cm->y_dc_delta_q = read_delta_q(rb);
cm->uv_dc_delta_q = read_delta_q(rb);
cm->uv_ac_delta_q = read_delta_q(rb);
cm->dequant_bit_depth = cm->bit_depth;
#if CONFIG_AOM_QM
- cm->using_qmatrix = vpx_rb_read_bit(rb);
+ cm->using_qmatrix = aom_rb_read_bit(rb);
if (cm->using_qmatrix) {
- cm->min_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
- cm->max_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
+ cm->min_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
+ cm->max_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
} else {
cm->min_qmlevel = 0;
cm->max_qmlevel = 0;
@@ -2043,7 +2033,7 @@
#endif
}
-static void setup_segmentation_dequant(VP10_COMMON *const cm) {
+static void setup_segmentation_dequant(AV1_COMMON *const cm) {
// Build y/uv dequant values based on segmentation.
int i = 0;
#if CONFIG_AOM_QM
@@ -2060,14 +2050,14 @@
#endif // CONFIG_NEW_QUANT
if (cm->seg.enabled) {
for (i = 0; i < MAX_SEGMENTS; ++i) {
- const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
+ const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
cm->y_dequant[i][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[i][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[i][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2086,10 +2076,10 @@
#if CONFIG_NEW_QUANT
for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
- cm->y_dequant_nuq[i][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
- cm->uv_dequant_nuq[i][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
+ cm->y_dequant_nuq[i][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
+ cm->uv_dequant_nuq[i][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
@@ -2098,13 +2088,12 @@
const int qindex = cm->base_qindex;
// When segmentation is disabled, only the first value is used. The
// remaining are don't cares.
- cm->y_dequant[0][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ cm->y_dequant[0][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[0][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[0][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
@@ -2122,41 +2111,41 @@
#if CONFIG_NEW_QUANT
for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
- cm->y_dequant_nuq[0][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
- cm->uv_dequant_nuq[0][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
+ cm->y_dequant_nuq[0][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
+ cm->uv_dequant_nuq[0][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
}
}
-static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? SWITCHABLE
- : vpx_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
+static INTERP_FILTER read_interp_filter(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? SWITCHABLE
+ : aom_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
}
-static void setup_render_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
cm->render_width = cm->width;
cm->render_height = cm->height;
- if (vpx_rb_read_bit(rb))
- vp10_read_frame_size(rb, &cm->render_width, &cm->render_height);
+ if (aom_rb_read_bit(rb))
+ av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
}
-static void resize_mv_buffer(VP10_COMMON *cm) {
- vpx_free(cm->cur_frame->mvs);
+static void resize_mv_buffer(AV1_COMMON *cm) {
+ aom_free(cm->cur_frame->mvs);
cm->cur_frame->mi_rows = cm->mi_rows;
cm->cur_frame->mi_cols = cm->mi_cols;
CHECK_MEM_ERROR(cm, cm->cur_frame->mvs,
- (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
sizeof(*cm->cur_frame->mvs)));
}
-static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
+static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Dimensions of %dx%d beyond allowed size of %dx%d.",
width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
#endif
@@ -2166,16 +2155,16 @@
const int new_mi_cols =
ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
- // Allocations in vp10_alloc_context_buffers() depend on individual
+ // Allocations in av1_alloc_context_buffers() depend on individual
// dimensions as well as the overall size.
if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
- if (vp10_alloc_context_buffers(cm, width, height))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ if (av1_alloc_context_buffers(cm, width, height))
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate context buffers");
} else {
- vp10_set_mb_mi(cm, width, height);
+ av1_set_mb_mi(cm, width, height);
}
- vp10_init_context_buffers(cm);
+ av1_init_context_buffers(cm);
cm->width = width;
cm->height = height;
}
@@ -2185,25 +2174,25 @@
}
}
-static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int width, height;
BufferPool *const pool = cm->buffer_pool;
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
resize_context_buffers(cm, width, height);
setup_render_size(cm, rb);
lock_buffer_pool(pool);
- if (vpx_realloc_frame_buffer(
+ if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
@@ -2217,22 +2206,22 @@
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
-static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
+static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
int ref_xss, int ref_yss,
- vpx_bit_depth_t this_bit_depth,
+ aom_bit_depth_t this_bit_depth,
int this_xss, int this_yss) {
return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
ref_yss == this_yss;
}
-static void setup_frame_size_with_refs(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size_with_refs(AV1_COMMON *cm,
+ struct aom_read_bit_buffer *rb) {
int width, height;
int found = 0, i;
int has_valid_ref_frame = 0;
BufferPool *const pool = cm->buffer_pool;
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
width = buf->y_crop_width;
height = buf->y_crop_height;
@@ -2244,12 +2233,12 @@
}
if (!found) {
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
setup_render_size(cm, rb);
}
if (width <= 0 || height <= 0)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid frame size");
// Check to make sure at least one of frames that this frame references
@@ -2261,7 +2250,7 @@
ref_frame->buf->y_crop_height, width, height);
}
if (!has_valid_ref_frame)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Referenced frame has invalid size");
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -2269,24 +2258,24 @@
ref_frame->buf->subsampling_x,
ref_frame->buf->subsampling_y, cm->bit_depth,
cm->subsampling_x, cm->subsampling_y))
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Referenced frame has incompatible color format");
}
resize_context_buffers(cm, width, height);
lock_buffer_pool(pool);
- if (vpx_realloc_frame_buffer(
+ if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
@@ -2300,27 +2289,27 @@
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
-static void read_tile_info(VP10Decoder *const pbi,
- struct vpx_read_bit_buffer *const rb) {
- VP10_COMMON *const cm = &pbi->common;
+static void read_tile_info(AV1Decoder *const pbi,
+ struct aom_read_bit_buffer *const rb) {
+ AV1_COMMON *const cm = &pbi->common;
#if CONFIG_EXT_TILE
// Read the tile width/height
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128) {
- cm->tile_width = vpx_rb_read_literal(rb, 5) + 1;
- cm->tile_height = vpx_rb_read_literal(rb, 5) + 1;
+ cm->tile_width = aom_rb_read_literal(rb, 5) + 1;
+ cm->tile_height = aom_rb_read_literal(rb, 5) + 1;
} else
#endif // CONFIG_EXT_PARTITION
{
- cm->tile_width = vpx_rb_read_literal(rb, 6) + 1;
- cm->tile_height = vpx_rb_read_literal(rb, 6) + 1;
+ cm->tile_width = aom_rb_read_literal(rb, 6) + 1;
+ cm->tile_height = aom_rb_read_literal(rb, 6) + 1;
}
cm->tile_width <<= cm->mib_size_log2;
cm->tile_height <<= cm->mib_size_log2;
- cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
- cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+ cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
+ cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
// Get the number of tiles
cm->tile_cols = 1;
@@ -2331,25 +2320,25 @@
if (cm->tile_cols * cm->tile_rows > 1) {
// Read the number of bytes used to store tile size
- pbi->tile_col_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
- pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+ pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
+ pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
}
#else
int min_log2_tile_cols, max_log2_tile_cols, max_ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
max_ones = max_log2_tile_cols - min_log2_tile_cols;
cm->log2_tile_cols = min_log2_tile_cols;
- while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
+ while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
if (cm->log2_tile_cols > 6)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid number of tile columns");
// rows
- cm->log2_tile_rows = vpx_rb_read_bit(rb);
- if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
+ cm->log2_tile_rows = aom_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
cm->tile_cols = 1 << cm->log2_tile_cols;
cm->tile_rows = 1 << cm->log2_tile_rows;
@@ -2365,7 +2354,7 @@
// tile size magnitude
if (cm->tile_rows > 1 || cm->tile_cols > 1) {
- pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+ pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
}
#endif // CONFIG_EXT_TILE
}
@@ -2384,8 +2373,8 @@
// Reads the next tile returning its size and adjusting '*data' accordingly
// based on 'is_last'.
static void get_tile_buffer(const uint8_t *const data_end,
- struct vpx_internal_error_info *error_info,
- const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ const uint8_t **data, aom_decrypt_cb decrypt_cb,
void *decrypt_state,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
int tile_size_bytes, int col, int row) {
@@ -2395,7 +2384,7 @@
const uint8_t *copy_data = NULL;
if (!read_is_valid(*data, tile_size_bytes, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (decrypt_cb) {
uint8_t be_data[4];
@@ -2421,7 +2410,7 @@
*data += tile_size_bytes;
if (size > (size_t)(data_end - *data))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile size");
if (size > 0) {
@@ -2438,9 +2427,9 @@
}
static void get_tile_buffers(
- VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+ AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
const int have_tiles = tile_cols * tile_rows > 1;
@@ -2459,11 +2448,11 @@
const uint8_t *tile_col_data_end[MAX_TILE_COLS];
const uint8_t *const data_start = data;
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2529,14 +2518,14 @@
// based on 'is_last'.
static void get_tile_buffer(const uint8_t *const data_end,
const int tile_size_bytes, int is_last,
- struct vpx_internal_error_info *error_info,
- const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ const uint8_t **data, aom_decrypt_cb decrypt_cb,
void *decrypt_state, TileBufferDec *const buf) {
size_t size;
if (!is_last) {
if (!read_is_valid(*data, 4, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (decrypt_cb) {
@@ -2549,7 +2538,7 @@
*data += tile_size_bytes;
if (size > (size_t)(data_end - *data))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile size");
} else {
size = data_end - *data;
@@ -2562,9 +2551,9 @@
}
static void get_tile_buffers(
- VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+ AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
int r, c;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
@@ -2581,20 +2570,20 @@
}
#endif // CONFIG_EXT_TILE
-static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ AV1_COMMON *const cm = &pbi->common;
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
const int n_tiles = tile_cols * tile_rows;
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
#if CONFIG_EXT_TILE
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2617,10 +2606,10 @@
if (cm->lf.filter_level && !cm->skip_loop_filter &&
pbi->lf_worker.data1 == NULL) {
CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
- vpx_memalign(32, sizeof(LFWorkerData)));
- pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
+ aom_memalign(32, sizeof(LFWorkerData)));
+ pbi->lf_worker.hook = (AVxWorkerHook)av1_loop_filter_worker;
if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Loop filter thread creation failed");
}
}
@@ -2629,8 +2618,8 @@
LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// Be sure to sync as we might be resuming after a failed frame decode.
winterface->sync(&pbi->lf_worker);
- vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
- pbi->mb.plane);
+ av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+ pbi->mb.plane);
}
assert(tile_rows <= MAX_TILE_ROWS);
@@ -2639,9 +2628,9 @@
get_tile_buffers(pbi, data, data_end, tile_buffers);
if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
- vpx_free(pbi->tile_data);
+ aom_free(pbi->tile_data);
CHECK_MEM_ERROR(cm, pbi->tile_data,
- vpx_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
+ aom_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
pbi->allocated_tiles = n_tiles;
}
@@ -2658,8 +2647,8 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &cm->counts
: NULL;
- vp10_zero(td->dqcoeff);
- vp10_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
+ av1_zero(td->dqcoeff);
+ av1_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
&td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
@@ -2667,7 +2656,7 @@
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
#endif
- vp10_init_macroblockd(cm, &td->xd, td->dqcoeff);
+ av1_init_macroblockd(cm, &td->xd, td->dqcoeff);
td->xd.plane[0].color_index_map = td->color_index_map[0];
td->xd.plane[1].color_index_map = td->color_index_map[1];
}
@@ -2678,21 +2667,21 @@
int mi_row = 0;
TileInfo tile_info;
- vp10_tile_set_row(&tile_info, cm, row);
+ av1_tile_set_row(&tile_info, cm, row);
for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
TileData *const td = pbi->tile_data + tile_cols * row + col;
- vp10_tile_set_col(&tile_info, cm, col);
+ av1_tile_set_col(&tile_info, cm, col);
- vp10_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
+ av1_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
mi_row += cm->mib_size) {
int mi_col;
- vp10_zero_left_context(&td->xd);
+ av1_zero_left_context(&td->xd);
for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
mi_col += cm->mib_size) {
@@ -2705,18 +2694,18 @@
}
pbi->mb.corrupted |= td->xd.corrupted;
if (pbi->mb.corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Failed to decode tile data");
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
if ((mi_row + MI_SIZE) %
(MI_SIZE *
- VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+ AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
0 &&
mi_row + MI_SIZE < cm->mi_rows &&
cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
- vp10_partial_adapt_probs(cm, mi_row, mi_col);
+ av1_partial_adapt_probs(cm, mi_row, mi_col);
++cm->coef_probs_update_idx;
}
}
@@ -2730,7 +2719,7 @@
// Loopfilter one tile row.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
- const int lf_start = VPXMAX(0, tile_info.mi_row_start - cm->mib_size);
+ const int lf_start = AOMMAX(0, tile_info.mi_row_start - cm->mib_size);
const int lf_end = tile_info.mi_row_end - cm->mib_size;
// Delay the loopfilter if the first tile row is only
@@ -2753,14 +2742,14 @@
// After loopfiltering, the last 7 row pixels in each superblock row may
// still be changed by the longest loopfilter of the next superblock row.
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
+ av1_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
#endif // !CONFIG_VAR_TX
}
#if CONFIG_VAR_TX
// Loopfilter the whole frame.
- vp10_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level, 0, 0);
+ av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+ cm->lf.filter_level, 0, 0);
#else
// Loopfilter remaining rows in the frame.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
@@ -2773,16 +2762,16 @@
#endif // CONFIG_VAR_TX
#if CONFIG_CLPF
if (cm->clpf && !cm->skip_loop_filter)
- vp10_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+ av1_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
#endif
#if CONFIG_DERING
if (cm->dering_level && !cm->skip_loop_filter) {
- vp10_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
+ av1_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
}
#endif // CONFIG_DERING
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
+ av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
#if CONFIG_EXT_TILE
if (n_tiles == 1) {
@@ -2790,7 +2779,7 @@
return data_end;
#else
// Find the end of the single tile buffer
- return vpx_reader_find_end(&pbi->tile_data->bit_reader);
+ return aom_reader_find_end(&pbi->tile_data->bit_reader);
#endif // CONFIG_ANS
} else {
// Return the end of the last tile buffer
@@ -2803,7 +2792,7 @@
{
// Get last tile data.
TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
- return vpx_reader_find_end(&td->bit_reader);
+ return aom_reader_find_end(&td->bit_reader);
}
#endif // CONFIG_ANS
#endif // CONFIG_EXT_TILE
@@ -2811,8 +2800,8 @@
static int tile_worker_hook(TileWorkerData *const tile_data,
const TileInfo *const tile) {
- VP10Decoder *const pbi = tile_data->pbi;
- const VP10_COMMON *const cm = &pbi->common;
+ AV1Decoder *const pbi = tile_data->pbi;
+ const AV1_COMMON *const cm = &pbi->common;
int mi_row, mi_col;
if (setjmp(tile_data->error_info.jmp)) {
@@ -2824,11 +2813,11 @@
tile_data->error_info.setjmp = 1;
tile_data->xd.error_info = &tile_data->error_info;
- vp10_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
+ av1_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += cm->mib_size) {
- vp10_zero_left_context(&tile_data->xd);
+ av1_zero_left_context(&tile_data->xd);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += cm->mib_size) {
@@ -2850,20 +2839,20 @@
return (int)(buf2->size - buf1->size);
}
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ AV1_COMMON *const cm = &pbi->common;
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
- const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols);
+ const int num_workers = AOMMIN(pbi->max_threads & ~1, tile_cols);
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
#if CONFIG_EXT_TILE
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2895,22 +2884,22 @@
if (pbi->num_tile_workers == 0) {
const int num_threads = pbi->max_threads & ~1;
CHECK_MEM_ERROR(cm, pbi->tile_workers,
- vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
+ aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
// Ensure tile data offsets will be properly aligned. This may fail on
// platforms without DECLARE_ALIGNED().
assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
CHECK_MEM_ERROR(
cm, pbi->tile_worker_data,
- vpx_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
+ aom_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
- vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
+ aom_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
for (i = 0; i < num_threads; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
++pbi->num_tile_workers;
winterface->init(worker);
if (i < num_threads - 1 && !winterface->reset(worker)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Tile decoder thread creation failed");
}
}
@@ -2918,9 +2907,9 @@
// Reset tile decoding hook
for (i = 0; i < num_workers; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
winterface->sync(worker);
- worker->hook = (VPxWorkerHook)tile_worker_hook;
+ worker->hook = (AVxWorkerHook)tile_worker_hook;
worker->data1 = &pbi->tile_worker_data[i];
worker->data2 = &pbi->tile_worker_info[i];
}
@@ -2929,7 +2918,7 @@
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_zero(twd->counts);
+ av1_zero(twd->counts);
}
}
@@ -2950,7 +2939,7 @@
int group_start;
for (group_start = tile_cols_start; group_start < tile_cols_end;
group_start += num_workers) {
- const int group_end = VPXMIN(group_start + num_workers, tile_cols);
+ const int group_end = AOMMIN(group_start + num_workers, tile_cols);
const TileBufferDec largest = tile_buffers[tile_row][group_start];
memmove(&tile_buffers[tile_row][group_start],
&tile_buffers[tile_row][group_start + 1],
@@ -2964,7 +2953,7 @@
for (i = 0; i < num_workers && tile_col < tile_cols_end;
++i, ++tile_col) {
TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
TileWorkerData *const twd = (TileWorkerData *)worker->data1;
TileInfo *const tile_info = (TileInfo *)worker->data2;
@@ -2975,9 +2964,9 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &twd->counts
: NULL;
- vp10_zero(twd->dqcoeff);
- vp10_tile_init(tile_info, cm, tile_row, buf->col);
- vp10_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
+ av1_zero(twd->dqcoeff);
+ av1_tile_init(tile_info, cm, tile_row, buf->col);
+ av1_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
&twd->bit_reader, pbi->decrypt_cb,
@@ -2987,7 +2976,7 @@
&twd->bit_reader, pbi->decrypt_cb,
pbi->decrypt_state);
#endif // CONFIG_ANS
- vp10_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
+ av1_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
twd->xd.plane[0].color_index_map = twd->color_index_map[0];
twd->xd.plane[1].color_index_map = twd->color_index_map[1];
@@ -3007,9 +2996,9 @@
// Sync all workers
for (; i > 0; --i) {
- VPxWorker *const worker = &pbi->tile_workers[i - 1];
+ AVxWorker *const worker = &pbi->tile_workers[i - 1];
// TODO(jzern): The tile may have specific error data associated with
- // its vpx_internal_error_info which could be propagated to the main
+ // its aom_internal_error_info which could be propagated to the main
// info in cm. Additionally once the threads have been synced and an
// error is detected, there's no point in continuing to decode tiles.
pbi->mb.corrupted |= !winterface->sync(worker);
@@ -3021,7 +3010,7 @@
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_accumulate_frame_counts(cm, &twd->counts);
+ av1_accumulate_frame_counts(cm, &twd->counts);
}
}
@@ -3036,42 +3025,42 @@
{
TileWorkerData *const twd =
(TileWorkerData *)pbi->tile_workers[final_worker].data1;
- return vpx_reader_find_end(&twd->bit_reader);
+ return aom_reader_find_end(&twd->bit_reader);
}
#endif // CONFIG_ANS
#endif // CONFIG_EXT_TILE
}
static void error_handler(void *data) {
- VP10_COMMON *const cm = (VP10_COMMON *)data;
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+ AV1_COMMON *const cm = (AV1_COMMON *)data;
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
}
-static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
+ struct aom_read_bit_buffer *rb) {
if (cm->profile >= PROFILE_2) {
- cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->bit_depth = aom_rb_read_bit(rb) ? AOM_BITS_12 : AOM_BITS_10;
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 1;
#endif
} else {
- cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->bit_depth = AOM_BITS_8;
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 0;
#endif
}
- cm->color_space = vpx_rb_read_literal(rb, 3);
- if (cm->color_space != VPX_CS_SRGB) {
+ cm->color_space = aom_rb_read_literal(rb, 3);
+ if (cm->color_space != AOM_CS_SRGB) {
// [16,235] (including xvycc) vs [0,255] range
- cm->color_range = vpx_rb_read_bit(rb);
+ cm->color_range = aom_rb_read_bit(rb);
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
- cm->subsampling_x = vpx_rb_read_bit(rb);
- cm->subsampling_y = vpx_rb_read_bit(rb);
+ cm->subsampling_x = aom_rb_read_bit(rb);
+ cm->subsampling_y = aom_rb_read_bit(rb);
if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"4:2:0 color not supported in profile 1 or 3");
- if (vpx_rb_read_bit(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_bit(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Reserved bit set");
} else {
cm->subsampling_y = cm->subsampling_x = 1;
@@ -3081,19 +3070,19 @@
// Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
// 4:2:2 or 4:4:0 chroma sampling is not allowed.
cm->subsampling_y = cm->subsampling_x = 0;
- if (vpx_rb_read_bit(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_bit(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Reserved bit set");
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"4:4:4 color not supported in profile 0 or 2");
}
}
}
-static size_t read_uncompressed_header(VP10Decoder *pbi,
- struct vpx_read_bit_buffer *rb) {
- VP10_COMMON *const cm = &pbi->common;
+static size_t read_uncompressed_header(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *rb) {
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = pool->frame_bufs;
@@ -3111,31 +3100,31 @@
cm->is_reference_frame = 1;
#endif // CONFIG_EXT_REFS
- if (vpx_rb_read_literal(rb, 2) != VPX_FRAME_MARKER)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame marker");
- cm->profile = vp10_read_profile(rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->profile = av1_read_profile(rb);
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->profile >= MAX_PROFILES)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
#else
if (cm->profile >= PROFILE_2)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
#endif
- cm->show_existing_frame = vpx_rb_read_bit(rb);
+ cm->show_existing_frame = aom_rb_read_bit(rb);
if (cm->show_existing_frame) {
// Show an existing frame directly.
- const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)];
+ const int frame_to_show = cm->ref_frame_map[aom_rb_read_literal(rb, 3)];
lock_buffer_pool(pool);
if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a decoded frame",
frame_to_show);
}
@@ -3154,13 +3143,13 @@
return 0;
}
- cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
- cm->show_frame = vpx_rb_read_bit(rb);
- cm->error_resilient_mode = vpx_rb_read_bit(rb);
+ cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
+ cm->show_frame = aom_rb_read_bit(rb);
+ cm->error_resilient_mode = aom_rb_read_bit(rb);
if (cm->frame_type == KEY_FRAME) {
- if (!vp10_read_sync_code(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_read_sync_code(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
read_bitdepth_colorspace_sampling(cm, rb);
@@ -3177,43 +3166,43 @@
pbi->need_resync = 0;
}
if (frame_is_intra_only(cm))
- cm->allow_screen_content_tools = vpx_rb_read_bit(rb);
+ cm->allow_screen_content_tools = aom_rb_read_bit(rb);
} else {
- cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
+ cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
if (cm->error_resilient_mode) {
cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
} else {
if (cm->intra_only) {
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_ALL
: RESET_FRAME_CONTEXT_CURRENT;
} else {
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_CURRENT
: RESET_FRAME_CONTEXT_NONE;
if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_ALL
: RESET_FRAME_CONTEXT_CURRENT;
}
}
if (cm->intra_only) {
- if (!vp10_read_sync_code(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_read_sync_code(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
read_bitdepth_colorspace_sampling(cm, rb);
- pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
setup_frame_size(cm, rb);
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
} else if (pbi->need_resync != 1) { /* Skip if need resync */
- pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
#if CONFIG_EXT_REFS
if (!pbi->refresh_frame_flags) {
@@ -3224,35 +3213,35 @@
#endif // CONFIG_EXT_REFS
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
- const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
+ const int ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
const int idx = cm->ref_frame_map[ref];
RefBuffer *const ref_frame = &cm->frame_refs[i];
ref_frame->idx = idx;
ref_frame->buf = &frame_bufs[idx].buf;
- cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb);
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
}
setup_frame_size_with_refs(cm, rb);
- cm->allow_high_precision_mv = vpx_rb_read_bit(rb);
+ cm->allow_high_precision_mv = aom_rb_read_bit(rb);
cm->interp_filter = read_interp_filter(rb);
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
-#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height,
cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height);
#endif
}
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
#endif
get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -3261,22 +3250,22 @@
get_frame_new_buffer(cm)->render_height = cm->render_height;
if (pbi->need_resync) {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Keyframe / intra-only frame required to reset decoder"
" state");
}
if (!cm->error_resilient_mode) {
- cm->refresh_frame_context = vpx_rb_read_bit(rb)
+ cm->refresh_frame_context = aom_rb_read_bit(rb)
? REFRESH_FRAME_CONTEXT_FORWARD
: REFRESH_FRAME_CONTEXT_BACKWARD;
} else {
cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
}
- // This flag will be overridden by the call to vp10_setup_past_independence
+ // This flag will be overridden by the call to av1_setup_past_independence
// below, forcing the use of context 0 for those frame types.
- cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+ cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
// Generate next_ref_frame_map.
lock_buffer_pool(pool);
@@ -3304,10 +3293,10 @@
pbi->hold_ref_buf = 1;
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
- vp10_setup_past_independence(cm);
+ av1_setup_past_independence(cm);
#if CONFIG_EXT_PARTITION
- set_sb_size(cm, vpx_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
+ set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
#else
set_sb_size(cm, BLOCK_64X64);
#endif // CONFIG_EXT_PARTITION
@@ -3323,12 +3312,12 @@
setup_restoration(cm, rb);
#endif // CONFIG_LOOP_RESTORATION
setup_quantization(cm, rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->bd = (int)cm->bit_depth;
#endif
#if CONFIG_ENTROPY
- vp10_default_coef_probs(cm);
+ av1_default_coef_probs(cm);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
@@ -3343,7 +3332,7 @@
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = cm->seg.enabled
- ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -3356,36 +3345,36 @@
cm->reference_mode = read_frame_reference_mode(cm, rb);
read_tile_info(pbi, rb);
- sz = vpx_rb_read_literal(rb, 16);
+ sz = aom_rb_read_literal(rb, 16);
if (sz == 0)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid header size");
return sz;
}
#if CONFIG_EXT_TX
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < num_ext_tx_set_inter[s] - 1; ++j)
- vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
+ av1_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
}
}
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
for (k = 0; k < num_ext_tx_set_intra[s] - 1; ++k)
- vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
+ av1_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
}
}
}
@@ -3393,31 +3382,31 @@
#else
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
+ av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
}
}
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
+ av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
}
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_SUPERTX
-static void read_supertx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- vp10_diff_update_prob(r, &fc->supertx_prob[i][j]);
+ av1_diff_update_prob(r, &fc->supertx_prob[i][j]);
}
}
}
@@ -3426,44 +3415,44 @@
#if CONFIG_GLOBAL_MOTION
static void read_global_motion_params(Global_Motion_Params *params,
- vpx_prob *probs, vp10_reader *r) {
+ aom_prob *probs, aom_reader *r) {
GLOBAL_MOTION_TYPE gmtype =
- vp10_read_tree(r, vp10_global_motion_types_tree, probs);
+ aom_read_tree(r, av1_global_motion_types_tree, probs);
params->gmtype = gmtype;
params->motion_params.wmtype = gm_to_trans_type(gmtype);
switch (gmtype) {
case GLOBAL_ZERO: break;
case GLOBAL_AFFINE:
params->motion_params.wmmat[4] =
- (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR);
params->motion_params.wmmat[5] =
- vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR +
(1 << WARPEDMODEL_PREC_BITS);
// fallthrough intended
case GLOBAL_ROTZOOM:
params->motion_params.wmmat[2] =
- (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR) +
(1 << WARPEDMODEL_PREC_BITS);
params->motion_params.wmmat[3] =
- vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR;
// fallthrough intended
case GLOBAL_TRANSLATION:
params->motion_params.wmmat[0] =
- vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
GM_TRANS_DECODE_FACTOR;
params->motion_params.wmmat[1] =
- vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
GM_TRANS_DECODE_FACTOR;
break;
default: assert(0);
}
}
-static void read_global_motion(VP10_COMMON *cm, vp10_reader *r) {
+static void read_global_motion(AV1_COMMON *cm, aom_reader *r) {
int frame;
memset(cm->global_motion, 0, sizeof(cm->global_motion));
for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
@@ -3473,24 +3462,24 @@
}
#endif // CONFIG_GLOBAL_MOTION
-static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
+static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
size_t partition_size) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
#if CONFIG_SUPERTX
MACROBLOCKD *const xd = &pbi->mb;
#endif
FRAME_CONTEXT *const fc = cm->fc;
- vp10_reader r;
+ aom_reader r;
int k, i, j;
#if !CONFIG_ANS
- if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb,
+ if (aom_reader_init(&r, data, partition_size, pbi->decrypt_cb,
pbi->decrypt_state))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
#else
if (ans_read_init(&r, data, partition_size))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate compressed header ANS decoder");
#endif // !CONFIG_ANS
@@ -3498,56 +3487,56 @@
for (i = 0; i < TX_SIZES - 1; ++i)
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
for (k = 0; k < i + 1; ++k)
- vp10_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
+ av1_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
}
read_coef_probs(fc, cm->tx_mode, &r);
#if CONFIG_VAR_TX
for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
- vp10_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
+ av1_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
#endif
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_diff_update_prob(&r, &fc->skip_probs[k]);
+ av1_diff_update_prob(&r, &fc->skip_probs[k]);
if (cm->seg.enabled && cm->seg.update_map) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
}
for (k = 0; k < MAX_SEGMENTS - 1; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
}
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
#if CONFIG_EXT_PARTITION_TYPES
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[0][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[0][i]);
for (j = 1; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < EXT_PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#else
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
for (j = 0; j < INTRA_FILTERS - 1; ++j)
- vp10_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
+ av1_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
#endif // CONFIG_EXT_INTRA
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
+ av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
@@ -3560,23 +3549,23 @@
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
if (is_interintra_allowed_bsize_group(i)) {
- vp10_diff_update_prob(&r, &fc->interintra_prob[i]);
+ av1_diff_update_prob(&r, &fc->interintra_prob[i]);
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
for (j = 0; j < INTERINTRA_MODES - 1; j++)
- vp10_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
+ av1_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
}
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) {
- vp10_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
+ av1_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
}
}
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interinter_wedge_used(i)) {
- vp10_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
+ av1_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
}
}
}
@@ -3585,14 +3574,14 @@
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i) {
for (j = 0; j < MOTION_VARIATIONS - 1; ++j)
- vp10_diff_update_prob(&r, &fc->motvar_prob[i][j]);
+ av1_diff_update_prob(&r, &fc->motvar_prob[i][j]);
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+ av1_diff_update_prob(&r, &fc->intra_inter_prob[i]);
if (cm->reference_mode != SINGLE_REFERENCE)
setup_compound_reference_mode(cm);
@@ -3601,7 +3590,7 @@
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
@@ -3618,7 +3607,7 @@
#endif // CONFIG_GLOBAL_MOTION
}
- return vp10_reader_has_error(&r);
+ return aom_reader_has_error(&r);
}
#ifdef NDEBUG
@@ -3626,9 +3615,9 @@
#else // !NDEBUG
// Counts should only be incremented when frame_parallel_decoding_mode and
// error_resilient_mode are disabled.
-static void debug_check_frame_counts(const VP10_COMMON *const cm) {
+static void debug_check_frame_counts(const AV1_COMMON *const cm) {
FRAME_COUNTS zero_counts;
- vp10_zero(zero_counts);
+ av1_zero(zero_counts);
assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
cm->error_resilient_mode);
assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
@@ -3689,14 +3678,14 @@
}
#endif // NDEBUG
-static struct vpx_read_bit_buffer *init_read_bit_buffer(
- VP10Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
- const uint8_t *data_end, uint8_t clear_data[MAX_VPX_HEADER_SIZE]) {
+static struct aom_read_bit_buffer *init_read_bit_buffer(
+ AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
if (pbi->decrypt_cb) {
- const int n = (int)VPXMIN(MAX_VPX_HEADER_SIZE, data_end - data);
+ const int n = (int)AOMMIN(MAX_AV1_HEADER_SIZE, data_end - data);
pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
rb->bit_buffer = clear_data;
rb->bit_buffer_end = clear_data + n;
@@ -3709,32 +3698,32 @@
//------------------------------------------------------------------------------
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) {
- return vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
- vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
- vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
+ return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
}
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
- int *height) {
- *width = vpx_rb_read_literal(rb, 16) + 1;
- *height = vpx_rb_read_literal(rb, 16) + 1;
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+ int *height) {
+ *width = aom_rb_read_literal(rb, 16) + 1;
+ *height = aom_rb_read_literal(rb, 16) + 1;
}
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
- int profile = vpx_rb_read_bit(rb);
- profile |= vpx_rb_read_bit(rb) << 1;
- if (profile > 2) profile += vpx_rb_read_bit(rb);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
+ int profile = aom_rb_read_bit(rb);
+ profile |= aom_rb_read_bit(rb) << 1;
+ if (profile > 2) profile += aom_rb_read_bit(rb);
return (BITSTREAM_PROFILE)profile;
}
-void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end, const uint8_t **p_data_end) {
- VP10_COMMON *const cm = &pbi->common;
+void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end) {
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- struct vpx_read_bit_buffer rb;
+ struct aom_read_bit_buffer rb;
int context_updated = 0;
- uint8_t clear_data[MAX_VPX_HEADER_SIZE];
+ uint8_t clear_data[MAX_AV1_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(
pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
@@ -3747,7 +3736,7 @@
// showing a frame directly
#if CONFIG_EXT_REFS
if (cm->show_existing_frame)
- *p_data_end = data + vpx_rb_bytes_read(&rb);
+ *p_data_end = data + aom_rb_bytes_read(&rb);
else
#endif // CONFIG_EXT_REFS
*p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
@@ -3755,9 +3744,9 @@
return;
}
- data += vpx_rb_bytes_read(&rb);
+ data += aom_rb_bytes_read(&rb);
if (!read_is_valid(data, first_partition_size, data_end))
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
cm->use_prev_frame_mvs =
@@ -3782,46 +3771,46 @@
}
#endif // CONFIG_EXT_REFS
- vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
if (!cm->fc->initialized)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Uninitialized entropy context.");
- vp10_zero(cm->counts);
+ av1_zero(cm->counts);
xd->corrupted = 0;
new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
if (new_fb->corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data header is corrupted.");
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
+ av1_loop_filter_frame_init(cm, cm->lf.filter_level);
}
// If encoded in frame parallel mode, frame context is ready after decoding
// the frame header.
if (cm->frame_parallel_decode &&
cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
- VPxWorker *const worker = pbi->frame_worker_owner;
+ AVxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
context_updated = 1;
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
frame_worker_data->frame_context_ready = 1;
// Signal the main thread that context is ready.
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
}
#if CONFIG_ENTROPY
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
cm->coef_probs_update_idx = 0;
#endif // CONFIG_ENTROPY
@@ -3836,12 +3825,12 @@
if (!cm->skip_loop_filter) {
// If multiple threads are used to decode tiles, then we use those
// threads to do parallel loopfiltering.
- vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
- cm->lf.filter_level, 0, 0, pbi->tile_workers,
- pbi->num_tile_workers, &pbi->lf_row_sync);
+ av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
+ 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+ &pbi->lf_row_sync);
}
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
}
} else {
@@ -3849,10 +3838,10 @@
}
#if CONFIG_LOOP_RESTORATION
if (cm->rst_info.restoration_type != RESTORE_NONE) {
- vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
- cm->frame_type == KEY_FRAME, cm->width,
- cm->height);
- vp10_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
+ av1_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+ cm->frame_type == KEY_FRAME, cm->width,
+ cm->height);
+ av1_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
}
#endif // CONFIG_LOOP_RESTORATION
@@ -3861,18 +3850,18 @@
#if CONFIG_ENTROPY
cm->partial_prob_update = 0;
#endif // CONFIG_ENTROPY
- vp10_adapt_coef_probs(cm);
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_coef_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
if (!frame_is_intra_only(cm)) {
- vp10_adapt_inter_frame_probs(cm);
- vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ av1_adapt_inter_frame_probs(cm);
+ av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
} else {
debug_check_frame_counts(cm);
}
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
}
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index 7fdff0b..020c424 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -8,26 +8,26 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODEFRAME_H_
-#define VP10_DECODER_DECODEFRAME_H_
+#ifndef AV1_DECODER_DECODEFRAME_H_
+#define AV1_DECODER_DECODEFRAME_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Decoder;
-struct vpx_read_bit_buffer;
+struct AV1Decoder;
+struct aom_read_bit_buffer;
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
- int *height);
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb);
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+ int *height);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
-void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end, const uint8_t **p_data_end);
+void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEFRAME_H_
+#endif // AV1_DECODER_DECODEFRAME_H_
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index ef776a0..47cfea6 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -22,27 +22,27 @@
#include "av1/decoder/decodemv.h"
#include "av1/decoder/decodeframe.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-static INLINE int read_uniform(vp10_reader *r, int n) {
+static INLINE int read_uniform(aom_reader *r, int n) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
- int v = vp10_read_literal(r, l - 1);
+ int v = aom_read_literal(r, l - 1);
assert(l != 0);
if (v < m)
return v;
else
- return (v << 1) - m + vp10_read_literal(r, 1);
+ return (v << 1) - m + aom_read_literal(r, 1);
}
-static PREDICTION_MODE read_intra_mode(vp10_reader *r, const vpx_prob *p) {
- return (PREDICTION_MODE)vp10_read_tree(r, vp10_intra_mode_tree, p);
+static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
+ return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
}
-static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r, int size_group) {
+static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int size_group) {
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
@@ -50,8 +50,8 @@
return y_mode;
}
-static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r,
+static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r,
PREDICTION_MODE y_mode) {
const PREDICTION_MODE uv_mode =
read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
@@ -61,27 +61,27 @@
}
#if CONFIG_EXT_INTER
-static INTERINTRA_MODE read_interintra_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r, int size_group) {
- const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)vp10_read_tree(
- r, vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
+static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int size_group) {
+ const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree(
+ r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->interintra_mode[size_group][ii_mode];
return ii_mode;
}
#endif // CONFIG_EXT_INTER
-static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_inter_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
MB_MODE_INFO *mbmi,
#endif
- vp10_reader *r, int16_t ctx) {
+ aom_reader *r, int16_t ctx) {
#if CONFIG_REF_MV
FRAME_COUNTS *counts = xd->counts;
int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
- vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
+ aom_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->newmv_mode[mode_ctx][0];
#if CONFIG_EXT_INTER
@@ -91,7 +91,7 @@
#if CONFIG_EXT_INTER
} else {
mode_prob = cm->fc->new2mv_prob;
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->new2mv_mode[0];
return NEWMV;
} else {
@@ -108,7 +108,7 @@
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
mode_prob = cm->fc->zeromv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->zeromv_mode[mode_ctx][0];
return ZEROMV;
}
@@ -122,7 +122,7 @@
mode_prob = cm->fc->refmv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
@@ -135,7 +135,7 @@
assert(0);
#else
const int mode =
- vp10_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+ aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_mode[ctx][mode];
@@ -144,18 +144,18 @@
}
#if CONFIG_REF_MV
-static void read_drl_idx(const VP10_COMMON *cm, MACROBLOCKD *xd,
- MB_MODE_INFO *mbmi, vp10_reader *r) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+ MB_MODE_INFO *mbmi, aom_reader *r) {
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
if (mbmi->mode == NEWMV) {
int idx;
for (idx = 0; idx < 2; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
- uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!vp10_read(r, drl_prob)) {
+ uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ if (!aom_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -173,9 +173,9 @@
// mode is factored in.
for (idx = 1; idx < 3; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
- uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!vp10_read(r, drl_prob)) {
+ uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ if (!aom_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx - 1;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -189,11 +189,10 @@
#endif
#if CONFIG_EXT_INTER
-static PREDICTION_MODE read_inter_compound_mode(VP10_COMMON *cm,
- MACROBLOCKD *xd, vp10_reader *r,
- int16_t ctx) {
- const int mode = vp10_read_tree(r, vp10_inter_compound_mode_tree,
- cm->fc->inter_compound_mode_probs[ctx]);
+static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int16_t ctx) {
+ const int mode = aom_read_tree(r, av1_inter_compound_mode_tree,
+ cm->fc->inter_compound_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_compound_mode[ctx][mode];
@@ -203,16 +202,16 @@
}
#endif // CONFIG_EXT_INTER
-static int read_segment_id(vp10_reader *r,
+static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
- return vp10_read_tree(r, vp10_segment_tree, segp->tree_probs);
+ return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
}
#if CONFIG_VAR_TX
-static void read_tx_size_vartx(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, FRAME_COUNTS *counts,
TX_SIZE tx_size, int blk_row, int blk_col,
- vp10_reader *r) {
+ aom_reader *r) {
int is_split = 0;
const int tx_row = blk_row >> 1;
const int tx_col = blk_col >> 1;
@@ -229,7 +228,7 @@
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
- is_split = vp10_read(r, cm->fc->txfm_partition_prob[ctx]);
+ is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx]);
if (is_split) {
BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
@@ -268,18 +267,18 @@
}
#endif
-static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
- int tx_size_cat, vp10_reader *r) {
+static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int tx_size_cat, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
- int tx_size = vp10_read_tree(r, vp10_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][ctx]);
+ int tx_size = aom_read_tree(r, av1_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][ctx]);
if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
return (TX_SIZE)tx_size;
}
-static TX_SIZE read_tx_size_intra(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r) {
+static TX_SIZE read_tx_size_intra(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -297,8 +296,8 @@
}
}
-static TX_SIZE read_tx_size_inter(VP10_COMMON *cm, MACROBLOCKD *xd,
- int allow_select, vp10_reader *r) {
+static TX_SIZE read_tx_size_inter(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int allow_select, aom_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -328,20 +327,20 @@
}
}
-static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
+static int dec_get_segment_id(const AV1_COMMON *cm, const uint8_t *segment_ids,
int mi_offset, int x_mis, int y_mis) {
int x, y, segment_id = INT_MAX;
for (y = 0; y < y_mis; y++)
for (x = 0; x < x_mis; x++)
segment_id =
- VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+ AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
return segment_id;
}
-static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+static void set_segment_id(AV1_COMMON *cm, int mi_offset, int x_mis, int y_mis,
int segment_id) {
int x, y;
@@ -352,9 +351,9 @@
cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
}
-static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int mi_offset, int x_mis, int y_mis,
- vp10_reader *r) {
+ aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
@@ -370,7 +369,7 @@
return segment_id;
}
-static void copy_segment_id(const VP10_COMMON *cm,
+static void copy_segment_id(const AV1_COMMON *cm,
const uint8_t *last_segment_ids,
uint8_t *current_segment_ids, int mi_offset,
int x_mis, int y_mis) {
@@ -383,8 +382,8 @@
: 0;
}
-static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- int mi_row, int mi_col, vp10_reader *r) {
+static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int mi_row, int mi_col, aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
@@ -395,8 +394,8 @@
const int bh = num_8x8_blocks_high_lookup[mbmi->sb_type];
// TODO(slavarnway): move x_mis, y_mis into xd ?????
- const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
if (!seg->enabled) return 0; // Default for disabled segmentation
@@ -412,9 +411,9 @@
}
if (seg->temporal_update) {
- const int ctx = vp10_get_pred_context_seg_id(xd);
- const vpx_prob pred_prob = segp->pred_probs[ctx];
- mbmi->seg_id_predicted = vp10_read(r, pred_prob);
+ const int ctx = av1_get_pred_context_seg_id(xd);
+ const aom_prob pred_prob = segp->pred_probs[ctx];
+ mbmi->seg_id_predicted = aom_read(r, pred_prob);
if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
@@ -430,21 +429,21 @@
return segment_id;
}
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
- vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
- const int ctx = vp10_get_skip_context(xd);
- const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+ const int ctx = av1_get_skip_context(xd);
+ const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
-static void read_palette_mode_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r) {
+static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
@@ -458,16 +457,16 @@
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- if (vp10_read(
+ if (aom_read(
r,
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
pmi->palette_size[0] =
- vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
+ aom_read_tree(r, av1_palette_size_tree,
+ av1_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
2;
n = pmi->palette_size[0];
for (i = 0; i < n; ++i)
- pmi->palette_colors[i] = vp10_read_literal(r, cm->bit_depth);
+ pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth);
xd->plane[0].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[0].color_index_map[0] < n);
@@ -475,18 +474,18 @@
}
if (mbmi->uv_mode == DC_PRED) {
- if (vp10_read(
- r, vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
+ if (aom_read(r,
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
pmi->palette_size[1] =
- vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
+ aom_read_tree(r, av1_palette_size_tree,
+ av1_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
2;
n = pmi->palette_size[1];
for (i = 0; i < n; ++i) {
pmi->palette_colors[PALETTE_MAX_SIZE + i] =
- vp10_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth);
pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] =
- vp10_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth);
}
xd->plane[1].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[1].color_index_map[0] < n);
@@ -495,8 +494,8 @@
}
#if CONFIG_EXT_INTRA
-static void read_ext_intra_mode_info(VP10_COMMON *const cm,
- MACROBLOCKD *const xd, vp10_reader *r) {
+static void read_ext_intra_mode_info(AV1_COMMON *const cm,
+ MACROBLOCKD *const xd, aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
FRAME_COUNTS *counts = xd->counts;
@@ -506,7 +505,7 @@
#endif
if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
- vp10_read(r, cm->fc->ext_intra_probs[0]);
+ aom_read(r, cm->fc->ext_intra_probs[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
mbmi->ext_intra_mode_info.ext_intra_mode[0] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -517,7 +516,7 @@
if (mbmi->uv_mode == DC_PRED &&
mbmi->palette_mode_info.palette_size[1] == 0) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
- vp10_read(r, cm->fc->ext_intra_probs[1]);
+ aom_read(r, cm->fc->ext_intra_probs[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
mbmi->ext_intra_mode_info.ext_intra_mode[1] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -527,11 +526,11 @@
}
}
-static void read_intra_angle_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r) {
+static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- const int ctx = vp10_get_pred_context_intra_interp(xd);
+ const int ctx = av1_get_pred_context_intra_interp(xd);
int p_angle;
if (bsize < BLOCK_8X8) return;
@@ -540,10 +539,10 @@
mbmi->angle_delta[0] =
read_uniform(r, 2 * MAX_ANGLE_DELTAS + 1) - MAX_ANGLE_DELTAS;
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
+ if (av1_is_intra_filter_switchable(p_angle)) {
FRAME_COUNTS *counts = xd->counts;
- mbmi->intra_filter = vp10_read_tree(r, vp10_intra_filter_tree,
- cm->fc->intra_filter_probs[ctx]);
+ mbmi->intra_filter = aom_read_tree(r, av1_intra_filter_tree,
+ cm->fc->intra_filter_probs[ctx]);
if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
} else {
mbmi->intra_filter = INTRA_FILTER_LINEAR;
@@ -557,9 +556,9 @@
}
#endif // CONFIG_EXT_INTRA
-static void read_intra_frame_mode_info(VP10_COMMON *const cm,
+static void read_intra_frame_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, int mi_row,
- int mi_col, vp10_reader *r) {
+ int mi_col, aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *above_mi = xd->above_mi;
@@ -571,8 +570,8 @@
const int bh = xd->plane[0].n4_h >> 1;
// TODO(slavarnway): move x_mis, y_mis into xd ?????
- const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
@@ -627,8 +626,8 @@
FRAME_COUNTS *counts = xd->counts;
int eset = get_ext_tx_set(mbmi->tx_size, mbmi->sb_type, 0);
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_intra_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
++counts
@@ -643,8 +642,8 @@
FRAME_COUNTS *counts = xd->counts;
TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
mbmi->tx_type =
- vp10_read_tree(r, vp10_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+ aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
} else {
@@ -654,64 +653,63 @@
}
}
-static int read_mv_component(vp10_reader *r, const nmv_component *mvcomp,
+static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
int mag, d, fr, hp;
- const int sign = vp10_read(r, mvcomp->sign);
- const int mv_class = vp10_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
+ const int sign = aom_read(r, mvcomp->sign);
+ const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
const int class0 = mv_class == MV_CLASS_0;
// Integer part
if (class0) {
- d = vp10_read_tree(r, vp10_mv_class0_tree, mvcomp->class0);
+ d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
mag = 0;
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
- for (i = 0; i < n; ++i) d |= vp10_read(r, mvcomp->bits[i]) << i;
+ for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i]) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
- fr = vp10_read_tree(r, vp10_mv_fp_tree,
- class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+ fr = aom_read_tree(r, av1_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
- hp = usehp ? vp10_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
+ hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
return sign ? -mag : mag;
}
-static INLINE void read_mv(vp10_reader *r, MV *mv, const MV *ref,
+static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
MV_JOINT_TYPE joint_type;
- const int use_hp = allow_hp && vp10_use_mv_hp(ref);
+ const int use_hp = allow_hp && av1_use_mv_hp(ref);
MV diff = { 0, 0 };
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
- int is_zero_rmv = vp10_read(r, ctx->zero_rmv);
+ int is_zero_rmv = aom_read(r, ctx->zero_rmv);
if (is_zero_rmv) {
joint_type = MV_JOINT_ZERO;
} else {
joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
}
} else {
joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
}
#else
- joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ joint_type = (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -724,19 +722,19 @@
if (mv_joint_horizontal(joint_type))
diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
- vp10_inc_mv(&diff, counts, use_hp);
+ av1_inc_mv(&diff, counts, use_hp);
mv->row = ref->row + diff.row;
mv->col = ref->col + diff.col;
}
-static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
+static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
const MACROBLOCKD *xd,
- vp10_reader *r) {
+ aom_reader *r) {
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- const int ctx = vp10_get_reference_mode_context(cm, xd);
+ const int ctx = av1_get_reference_mode_context(cm, xd);
const REFERENCE_MODE mode =
- (REFERENCE_MODE)vp10_read(r, cm->fc->comp_inter_prob[ctx]);
+ (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
@@ -746,8 +744,8 @@
}
// Read the referncence frame
-static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r, int segment_id,
+static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = xd->counts;
@@ -765,29 +763,29 @@
#else
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
#endif // CONFIG_EXT_REFS
- const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
- const int bit = vp10_read(r, fc->comp_ref_prob[ctx][0]);
+ const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
+ const int bit = aom_read(r, fc->comp_ref_prob[ctx][0]);
if (counts) ++counts->comp_ref[ctx][0][bit];
#if CONFIG_EXT_REFS
// Decode forward references.
if (!bit) {
- const int ctx1 = vp10_get_pred_context_comp_ref_p1(cm, xd);
- const int bit1 = vp10_read(r, fc->comp_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_comp_ref_p1(cm, xd);
+ const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1]);
if (counts) ++counts->comp_ref[ctx1][1][bit1];
ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
} else {
- const int ctx2 = vp10_get_pred_context_comp_ref_p2(cm, xd);
- const int bit2 = vp10_read(r, fc->comp_ref_prob[ctx2][2]);
+ const int ctx2 = av1_get_pred_context_comp_ref_p2(cm, xd);
+ const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2]);
if (counts) ++counts->comp_ref[ctx2][2][bit2];
ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
}
// Decode backward references.
{
- const int ctx_bwd = vp10_get_pred_context_comp_bwdref_p(cm, xd);
- const int bit_bwd = vp10_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
+ const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
+ const int bit_bwd = aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
}
@@ -797,39 +795,39 @@
#endif // CONFIG_EXT_REFS
} else if (mode == SINGLE_REFERENCE) {
#if CONFIG_EXT_REFS
- const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
- const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+ const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
- const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
- const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
} else {
- const int ctx2 = vp10_get_pred_context_single_ref_p3(xd);
- const int bit2 = vp10_read(r, fc->single_ref_prob[ctx2][2]);
+ const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
+ const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2]);
if (counts) ++counts->single_ref[ctx2][2][bit2];
if (bit2) {
- const int ctx4 = vp10_get_pred_context_single_ref_p5(xd);
- const int bit4 = vp10_read(r, fc->single_ref_prob[ctx4][4]);
+ const int ctx4 = av1_get_pred_context_single_ref_p5(xd);
+ const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4]);
if (counts) ++counts->single_ref[ctx4][4][bit4];
ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
} else {
- const int ctx3 = vp10_get_pred_context_single_ref_p4(xd);
- const int bit3 = vp10_read(r, fc->single_ref_prob[ctx3][3]);
+ const int ctx3 = av1_get_pred_context_single_ref_p4(xd);
+ const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3]);
if (counts) ++counts->single_ref[ctx3][3][bit3];
ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
}
}
#else
- const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
- const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+ const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
- const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
- const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
@@ -845,16 +843,16 @@
}
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static MOTION_VARIATION read_motvar_block(VP10_COMMON *const cm,
+static MOTION_VARIATION read_motvar_block(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
- vp10_reader *r) {
+ aom_reader *r) {
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
FRAME_COUNTS *counts = xd->counts;
MOTION_VARIATION motvar;
if (is_motvar_allowed(&xd->mi[0]->mbmi)) {
- motvar = (MOTION_VARIATION)vp10_read_tree(r, vp10_motvar_tree,
- cm->fc->motvar_prob[bsize]);
+ motvar = (MOTION_VARIATION)aom_read_tree(r, av1_motvar_tree,
+ cm->fc->motvar_prob[bsize]);
if (counts) ++counts->motvar[bsize][motvar];
return motvar;
} else {
@@ -863,34 +861,34 @@
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-static INLINE INTERP_FILTER read_interp_filter(VP10_COMMON *const cm,
+static INLINE INTERP_FILTER read_interp_filter(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
#if CONFIG_DUAL_FILTER
int dir,
#endif
- vp10_reader *r) {
+ aom_reader *r) {
#if CONFIG_EXT_INTERP
- if (!vp10_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
+ if (!av1_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
#endif
if (cm->interp_filter != SWITCHABLE) {
return cm->interp_filter;
} else {
#if CONFIG_DUAL_FILTER
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
#else
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
#endif
FRAME_COUNTS *counts = xd->counts;
- const INTERP_FILTER type = (INTERP_FILTER)vp10_read_tree(
- r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ const INTERP_FILTER type = (INTERP_FILTER)aom_read_tree(
+ r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
}
-static void read_intra_block_mode_info(VP10_COMMON *const cm,
+static void read_intra_block_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, MODE_INFO *mi,
- vp10_reader *r) {
+ aom_reader *r) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
int i;
@@ -938,14 +936,14 @@
mv->col < MV_UPP;
}
-static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
PREDICTION_MODE mode,
#if CONFIG_REF_MV
int block,
#endif
int_mv mv[2], int_mv ref_mv[2],
int_mv nearest_mv[2], int_mv near_mv[2],
- int is_compound, int allow_hp, vp10_reader *r) {
+ int is_compound, int allow_hp, aom_reader *r) {
int i;
int ret = 1;
#if CONFIG_REF_MV
@@ -966,8 +964,8 @@
#endif
for (i = 0; i < 1 + is_compound; ++i) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
- xd->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+ xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
@@ -1026,8 +1024,8 @@
assert(is_compound);
for (i = 0; i < 2; ++i) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
- xd->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+ xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, is_compound,
@@ -1067,8 +1065,8 @@
case NEW_NEARESTMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
- xd->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+ xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1086,8 +1084,8 @@
case NEAREST_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
- xd->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+ xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = nearest_mv[0].as_int;
@@ -1106,8 +1104,8 @@
case NEAR_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
- xd->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+ xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = near_mv[0].as_int;
@@ -1127,8 +1125,8 @@
case NEW_NEARMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
- xd->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+ xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1155,13 +1153,13 @@
return ret;
}
-static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- int segment_id, vp10_reader *r) {
+static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int segment_id, aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
} else {
- const int ctx = vp10_get_intra_inter_context(xd);
- const int is_inter = vp10_read(r, cm->fc->intra_inter_prob[ctx]);
+ const int ctx = av1_get_intra_inter_context(xd);
+ const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
@@ -1169,21 +1167,21 @@
}
static void fpm_sync(void *const data, int mi_row) {
- VP10Decoder *const pbi = (VP10Decoder *)data;
- vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
- mi_row << pbi->common.mib_size_log2);
+ AV1Decoder *const pbi = (AV1Decoder *)data;
+ av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
+ mi_row << pbi->common.mib_size_log2);
}
-static void read_inter_block_mode_info(VP10Decoder *const pbi,
+static void read_inter_block_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
#if (CONFIG_OBMC || CONFIG_EXT_INTER) && CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
int supertx_enabled) {
#else
- int mi_row, int mi_col, vp10_reader *r) {
+ int mi_row, int mi_col, aom_reader *r) {
#endif // CONFIG_OBMC && CONFIG_SUPERTX
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
@@ -1211,22 +1209,22 @@
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
- if ((!vp10_is_valid_scale(&ref_buf->sf)))
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ if ((!av1_is_valid_scale(&ref_buf->sf)))
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+ av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
}
for (ref_frame = LAST_FRAME; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
- vp10_find_mv_refs(cm, xd, mi, ref_frame,
+ av1_find_mv_refs(cm, xd, mi, ref_frame,
#if CONFIG_REF_MV
- &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
+ &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
#if CONFIG_EXT_INTER
- compound_inter_mode_ctx,
+ compound_inter_mode_ctx,
#endif // CONFIG_EXT_INTER
#endif
- ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
- inter_mode_ctx);
+ ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
+ inter_mode_ctx);
}
#if CONFIG_REF_MV
@@ -1236,7 +1234,7 @@
else
#endif // CONFIG_EXT_INTER
mode_ctx =
- vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
+ av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
mbmi->ref_mv_idx = 0;
#else
mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]];
@@ -1245,7 +1243,7 @@
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid usage of segement feature on small blocks");
return;
}
@@ -1275,8 +1273,8 @@
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref) {
- vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
- &nearestmv[ref], &nearmv[ref]);
+ av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+ &nearestmv[ref], &nearmv[ref]);
}
}
@@ -1293,7 +1291,7 @@
if (is_compound && bsize >= BLOCK_8X8 && mbmi->mode != NEWMV &&
mbmi->mode != ZEROMV) {
#endif // CONFIG_EXT_INTER
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 0) {
@@ -1366,8 +1364,8 @@
#if CONFIG_EXT_INTER
if (!is_compound)
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
- bsize, j);
+ mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
+ bsize, j);
#endif
#if CONFIG_EXT_INTER
if (is_compound)
@@ -1395,24 +1393,24 @@
#if CONFIG_EXT_INTER
{
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
- mi_row, mi_col, NULL);
+ av1_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
+ mi_row, mi_col, NULL);
#endif // CONFIG_EXT_INTER
- vp10_append_sub8x8_mvs_for_idx(
- cm, xd, j, ref, mi_row, mi_col,
+ av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
#if CONFIG_REF_MV
- ref_mv_stack[ref], &ref_mv_count[ref],
+ ref_mv_stack[ref], &ref_mv_count[ref],
#endif
#if CONFIG_EXT_INTER
- mv_ref_list,
+ mv_ref_list,
#endif // CONFIG_EXT_INTER
- &nearest_sub8x8[ref], &near_sub8x8[ref]);
+ &nearest_sub8x8[ref],
+ &near_sub8x8[ref]);
#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(b_mode)) {
mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
- vp10_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
- &ref_mv[1][ref]);
+ av1_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
+ &ref_mv[1][ref]);
}
}
#endif // CONFIG_EXT_INTER
@@ -1469,7 +1467,7 @@
for (ref = 0; ref < 1 + is_compound && mbmi->mode == NEWMV; ++ref) {
#if CONFIG_REF_MV
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
if (xd->ref_mv_count[ref_frame_type] > 1) {
ref_mv[ref] =
(ref == 0)
@@ -1503,7 +1501,7 @@
#endif
is_interintra_allowed(mbmi)) {
const int bsize_group = size_group_lookup[bsize];
- const int interintra = vp10_read(r, cm->fc->interintra_prob[bsize_group]);
+ const int interintra = aom_read(r, cm->fc->interintra_prob[bsize_group]);
if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
@@ -1520,12 +1518,12 @@
#endif // CONFIG_EXT_INTRA
if (is_interintra_wedge_used(bsize)) {
mbmi->use_wedge_interintra =
- vp10_read(r, cm->fc->wedge_interintra_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interintra_prob[bsize]);
if (xd->counts)
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup(bsize));
+ aom_read_literal(r, get_wedge_bits_lookup(bsize));
mbmi->interintra_wedge_sign = 0;
}
}
@@ -1554,13 +1552,13 @@
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
is_interinter_wedge_used(bsize)) {
mbmi->use_wedge_interinter =
- vp10_read(r, cm->fc->wedge_interinter_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interinter_prob[bsize]);
if (xd->counts)
xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->interinter_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup(bsize));
- mbmi->interinter_wedge_sign = vp10_read_bit(r);
+ aom_read_literal(r, get_wedge_bits_lookup(bsize));
+ mbmi->interinter_wedge_sign = aom_read_bit(r);
}
}
#endif // CONFIG_EXT_INTER
@@ -1588,13 +1586,13 @@
#endif // CONFIG_DUAL_FILTER
}
-static void read_inter_frame_mode_info(VP10Decoder *const pbi,
+static void read_inter_frame_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r) {
- VP10_COMMON *const cm = &pbi->common;
+ int mi_row, int mi_col, aom_reader *r) {
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block = 1;
@@ -1693,8 +1691,8 @@
if (inter_block) {
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_inter_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_inter_tree[eset],
cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]]);
if (counts)
++counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
@@ -1702,8 +1700,8 @@
}
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_intra_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
@@ -1721,13 +1719,13 @@
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+ mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree,
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -1739,13 +1737,13 @@
}
}
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERTX
- int supertx_enabled,
+ int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r, int x_mis,
- int y_mis) {
- VP10_COMMON *const cm = &pbi->common;
+ int mi_row, int mi_col, aom_reader *r, int x_mis,
+ int y_mis) {
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h
index 59fdd70..cf3d917 100644
--- a/av1/decoder/decodemv.h
+++ b/av1/decoder/decodemv.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODEMV_H_
-#define VP10_DECODER_DECODEMV_H_
+#ifndef AV1_DECODER_DECODEMV_H_
+#define AV1_DECODER_DECODEMV_H_
#include "av1/decoder/bitreader.h"
@@ -19,16 +19,16 @@
extern "C" {
#endif
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERTX
- int supertx_enabled,
+ int supertx_enabled,
#endif
- int mi_row, int mi_col, vp10_reader *r, int x_mis,
- int y_mis);
+ int mi_row, int mi_col, aom_reader *r, int x_mis,
+ int y_mis);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEMV_H_
+#endif // AV1_DECODER_DECODEMV_H_
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 4cea36b..58952c0 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -12,16 +12,16 @@
#include <limits.h>
#include <stdio.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/system_state.h"
-#include "aom_ports/vpx_once.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_ports/aom_once.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/loopfilter.h"
@@ -38,60 +38,60 @@
static volatile int init_done = 0;
if (!init_done) {
- vp10_rtcd();
- vpx_dsp_rtcd();
- vpx_scale_rtcd();
- vp10_init_intra_predictors();
+ av1_rtcd();
+ aom_dsp_rtcd();
+ aom_scale_rtcd();
+ av1_init_intra_predictors();
#if CONFIG_EXT_INTER
- vp10_init_wedge_masks();
+ av1_init_wedge_masks();
#endif // CONFIG_EXT_INTER
init_done = 1;
}
}
-static void vp10_dec_setup_mi(VP10_COMMON *cm) {
+static void av1_dec_setup_mi(AV1_COMMON *cm) {
cm->mi = cm->mip + cm->mi_stride + 1;
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
memset(cm->mi_grid_base, 0,
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
-static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
- cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+static int av1_dec_alloc_mi(AV1_COMMON *cm, int mi_size) {
+ cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ cm->mi_grid_base = (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->mi_grid_base) return 1;
return 0;
}
-static void vp10_dec_free_mi(VP10_COMMON *cm) {
- vpx_free(cm->mip);
+static void av1_dec_free_mi(AV1_COMMON *cm) {
+ aom_free(cm->mip);
cm->mip = NULL;
- vpx_free(cm->mi_grid_base);
+ aom_free(cm->mi_grid_base);
cm->mi_grid_base = NULL;
}
-VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
- VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
- VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+AV1Decoder *av1_decoder_create(BufferPool *const pool) {
+ AV1Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
+ AV1_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm) return NULL;
- vp10_zero(*pbi);
+ av1_zero(*pbi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
- vp10_decoder_remove(pbi);
+ av1_decoder_remove(pbi);
return NULL;
}
cm->error.setjmp = 1;
- CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+ (FRAME_CONTEXT *)aom_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
once(initialize_dec);
@@ -104,50 +104,50 @@
pbi->ready_for_new_data = 1;
pbi->common.buffer_pool = pool;
- cm->bit_depth = VPX_BITS_8;
- cm->dequant_bit_depth = VPX_BITS_8;
+ cm->bit_depth = AOM_BITS_8;
+ cm->dequant_bit_depth = AOM_BITS_8;
- cm->alloc_mi = vp10_dec_alloc_mi;
- cm->free_mi = vp10_dec_free_mi;
- cm->setup_mi = vp10_dec_setup_mi;
+ cm->alloc_mi = av1_dec_alloc_mi;
+ cm->free_mi = av1_dec_free_mi;
+ cm->setup_mi = av1_dec_setup_mi;
- vp10_loop_filter_init(cm);
+ av1_loop_filter_init(cm);
#if CONFIG_AOM_QM
aom_qm_init(cm);
#endif
#if CONFIG_LOOP_RESTORATION
- vp10_loop_restoration_precal();
+ av1_loop_restoration_precal();
#endif // CONFIG_LOOP_RESTORATION
cm->error.setjmp = 0;
- vpx_get_worker_interface()->init(&pbi->lf_worker);
+ aom_get_worker_interface()->init(&pbi->lf_worker);
return pbi;
}
-void vp10_decoder_remove(VP10Decoder *pbi) {
+void av1_decoder_remove(AV1Decoder *pbi) {
int i;
if (!pbi) return;
- vpx_get_worker_interface()->end(&pbi->lf_worker);
- vpx_free(pbi->lf_worker.data1);
- vpx_free(pbi->tile_data);
+ aom_get_worker_interface()->end(&pbi->lf_worker);
+ aom_free(pbi->lf_worker.data1);
+ aom_free(pbi->tile_data);
for (i = 0; i < pbi->num_tile_workers; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
- vpx_get_worker_interface()->end(worker);
+ AVxWorker *const worker = &pbi->tile_workers[i];
+ aom_get_worker_interface()->end(worker);
}
- vpx_free(pbi->tile_worker_data);
- vpx_free(pbi->tile_worker_info);
- vpx_free(pbi->tile_workers);
+ aom_free(pbi->tile_worker_data);
+ aom_free(pbi->tile_worker_info);
+ aom_free(pbi->tile_workers);
if (pbi->num_tile_workers > 0) {
- vp10_loop_filter_dealloc(&pbi->lf_row_sync);
+ av1_loop_filter_dealloc(&pbi->lf_row_sync);
}
- vpx_free(pbi);
+ aom_free(pbi);
}
static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
@@ -156,45 +156,45 @@
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
-vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *cm = &pbi->common;
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ AV1_COMMON *cm = &pbi->common;
/* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
* encoder is using the frame buffers for. This is just a stub to keep the
- * vpxenc --test-decode functionality working, and will be replaced in a
- * later commit that adds VP9-specific controls for this functionality.
+ * aomenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds AV1-specific controls for this functionality.
*/
- if (ref_frame_flag == VPX_LAST_FLAG) {
+ if (ref_frame_flag == AOM_LAST_FLAG) {
const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
if (cfg == NULL) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"No 'last' reference frame");
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
if (!equal_dimensions(cfg, sd))
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Incorrect buffer dimensions");
else
- vpx_yv12_copy_frame(cfg, sd);
+ aom_yv12_copy_frame(cfg, sd);
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
}
return cm->error.error_code;
}
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
int idx;
YV12_BUFFER_CONFIG *ref_buf = NULL;
// TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
// encoder is using the frame buffers for. This is just a stub to keep the
- // vpxenc --test-decode functionality working, and will be replaced in a
- // later commit that adds VP9-specific controls for this functionality.
+ // aomenc --test-decode functionality working, and will be replaced in a
+ // later commit that adds AV1-specific controls for this functionality.
// (Yunqing) The set_reference control depends on the following setting in
// encoder.
@@ -212,32 +212,32 @@
// TODO(zoeliu): To revisit following code and reconsider what assumption we
// may take on the reference frame buffer virtual indexes
- if (ref_frame_flag == VPX_LAST_FLAG) {
+ if (ref_frame_flag == AOM_LAST_FLAG) {
idx = cm->ref_frame_map[0];
#if CONFIG_EXT_REFS
- } else if (ref_frame_flag == VPX_LAST2_FLAG) {
+ } else if (ref_frame_flag == AOM_LAST2_FLAG) {
idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == VPX_LAST3_FLAG) {
+ } else if (ref_frame_flag == AOM_LAST3_FLAG) {
idx = cm->ref_frame_map[2];
- } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+ } else if (ref_frame_flag == AOM_GOLD_FLAG) {
idx = cm->ref_frame_map[3];
- } else if (ref_frame_flag == VPX_BWD_FLAG) {
+ } else if (ref_frame_flag == AOM_BWD_FLAG) {
idx = cm->ref_frame_map[4];
- } else if (ref_frame_flag == VPX_ALT_FLAG) {
+ } else if (ref_frame_flag == AOM_ALT_FLAG) {
idx = cm->ref_frame_map[5];
#else
- } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+ } else if (ref_frame_flag == AOM_GOLD_FLAG) {
idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == VPX_ALT_FLAG) {
+ } else if (ref_frame_flag == AOM_ALT_FLAG) {
idx = cm->ref_frame_map[2];
#endif // CONFIG_EXT_REFS
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
return cm->error.error_code;
}
if (idx < 0 || idx >= FRAME_BUFFERS) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Invalid reference frame map");
return cm->error.error_code;
}
@@ -246,20 +246,20 @@
ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
if (!equal_dimensions(ref_buf, sd)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Incorrect buffer dimensions");
} else {
// Overwrite the reference frame buffer.
- vpx_yv12_copy_frame(sd, ref_buf);
+ aom_yv12_copy_frame(sd, ref_buf);
}
return cm->error.error_code;
}
/* If any buffer updating is signaled it should be done here. */
-static void swap_frame_buffers(VP10Decoder *pbi) {
+static void swap_frame_buffers(AV1Decoder *pbi) {
int ref_index = 0, mask;
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
@@ -302,14 +302,14 @@
}
}
-int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
- const uint8_t **psource) {
- VP10_COMMON *volatile const cm = &pbi->common;
+int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
+ const uint8_t **psource) {
+ AV1_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
const uint8_t *source = *psource;
int retcode = 0;
- cm->error.error_code = VPX_CODEC_OK;
+ cm->error.error_code = AOM_CODEC_OK;
if (size == 0) {
// This is used to signal that we are missing frames.
@@ -340,27 +340,27 @@
// Find a free frame buffer. Return error if can not find any.
cm->new_fb_idx = get_free_fb(cm);
- if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
+ if (cm->new_fb_idx == INVALID_IDX) return AOM_CODEC_MEM_ERROR;
// Assign a MV array to the frame buffer.
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
pbi->hold_ref_buf = 0;
if (cm->frame_parallel_decode) {
- VPxWorker *const worker = pbi->frame_worker_owner;
- vp10_frameworker_lock_stats(worker);
+ AVxWorker *const worker = pbi->frame_worker_owner;
+ av1_frameworker_lock_stats(worker);
frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
// Reset decoding progress.
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
if (setjmp(cm->error.jmp)) {
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
int i;
cm->error.setjmp = 0;
@@ -399,12 +399,12 @@
decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
unlock_buffer_pool(pool);
- vpx_clear_system_state();
+ aom_clear_system_state();
return -1;
}
cm->error.setjmp = 1;
- vp10_decode_frame(pbi, source, source + size, psource);
+ av1_decode_frame(pbi, source, source + size, psource);
swap_frame_buffers(pbi);
@@ -414,9 +414,9 @@
// border.
if (pbi->dec_tile_row == -1 && pbi->dec_tile_col == -1)
#endif // CONFIG_EXT_TILE
- vpx_extend_frame_inner_borders(cm->frame_to_show);
+ aom_extend_frame_inner_borders(cm->frame_to_show);
- vpx_clear_system_state();
+ aom_clear_system_state();
if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
@@ -428,24 +428,24 @@
cm->prev_frame = cm->cur_frame;
if (cm->seg.enabled && !cm->frame_parallel_decode)
- vp10_swap_current_and_last_seg_map(cm);
+ av1_swap_current_and_last_seg_map(cm);
}
// Update progress in frame parallel decode.
if (cm->frame_parallel_decode) {
// Need to lock the mutex here as another thread may
// be accessing this buffer.
- VPxWorker *const worker = pbi->frame_worker_owner;
+ AVxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
if (cm->show_frame) {
cm->current_video_frame++;
}
frame_worker_data->frame_decoded = 1;
frame_worker_data->frame_context_ready = 1;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
cm->last_width = cm->width;
cm->last_height = cm->height;
@@ -458,8 +458,8 @@
return retcode;
}
-int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *const cm = &pbi->common;
+int av1_get_raw_frame(AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
+ AV1_COMMON *const cm = &pbi->common;
int ret = -1;
if (pbi->ready_for_new_data == 1) return ret;
@@ -471,12 +471,12 @@
pbi->ready_for_new_data = 1;
*sd = *cm->frame_to_show;
ret = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
return ret;
}
-int vp10_get_frame_to_show(VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
- VP10_COMMON *const cm = &pbi->common;
+int av1_get_frame_to_show(AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
+ AV1_COMMON *const cm = &pbi->common;
if (!cm->show_frame || !cm->frame_to_show) return -1;
@@ -484,10 +484,10 @@
return 0;
}
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ aom_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
// A chunk ending with a byte matching 0xc0 is an invalid chunk unless
// it is a super frame index. If the last byte of real video compression
// data is 0xc0 the encoder must add a 0 byte. If we have the marker but
@@ -508,7 +508,7 @@
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
- if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
+ if (data_sz < index_sz) return AOM_CODEC_CORRUPT_FRAME;
{
const uint8_t marker2 =
@@ -517,7 +517,7 @@
// This chunk is marked as having a superframe index but doesn't have
// the matching marker byte at the front of the index therefore it's an
// invalid chunk.
- if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
+ if (marker != marker2) return AOM_CODEC_CORRUPT_FRAME;
}
{
@@ -545,5 +545,5 @@
*count = frames;
}
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
diff --git a/av1/decoder/decoder.h b/av1/decoder/decoder.h
index 47a5a7b..b399768 100644
--- a/av1/decoder/decoder.h
+++ b/av1/decoder/decoder.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODER_H_
-#define VP10_DECODER_DECODER_H_
+#ifndef AV1_DECODER_DECODER_H_
+#define AV1_DECODER_DECODER_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
#include "av1/decoder/bitreader.h"
#include "aom_scale/yv12config.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/thread_common.h"
#include "av1/common/onyxc_int.h"
@@ -28,8 +28,8 @@
// TODO(hkuang): combine this with TileWorkerData.
typedef struct TileData {
- VP10_COMMON *cm;
- vp10_reader bit_reader;
+ AV1_COMMON *cm;
+ aom_reader bit_reader;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
@@ -37,14 +37,14 @@
} TileData;
typedef struct TileWorkerData {
- struct VP10Decoder *pbi;
- vp10_reader bit_reader;
+ struct AV1Decoder *pbi;
+ aom_reader bit_reader;
FRAME_COUNTS counts;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
- struct vpx_internal_error_info error_info;
+ struct aom_internal_error_info error_info;
} TileWorkerData;
typedef struct TileBufferDec {
@@ -55,10 +55,10 @@
int col; // only used with multi-threaded decoding
} TileBufferDec;
-typedef struct VP10Decoder {
+typedef struct AV1Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
- DECLARE_ALIGNED(16, VP10_COMMON, common);
+ DECLARE_ALIGNED(16, AV1_COMMON, common);
int ready_for_new_data;
@@ -68,9 +68,9 @@
// the same.
RefCntBuffer *cur_buf; // Current decoding frame buffer.
- VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
- VPxWorker lf_worker;
- VPxWorker *tile_workers;
+ AVxWorker *frame_worker_owner; // frame_worker that owns this pbi.
+ AVxWorker lf_worker;
+ AVxWorker *tile_workers;
TileWorkerData *tile_worker_data;
TileInfo *tile_worker_info;
int num_tile_workers;
@@ -80,9 +80,9 @@
TileBufferDec tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS];
- VP10LfSync lf_row_sync;
+ AV1LfSync lf_row_sync;
- vpx_decrypt_cb decrypt_cb;
+ aom_decrypt_cb decrypt_cb;
void *decrypt_state;
int max_threads;
@@ -95,24 +95,24 @@
int tile_col_size_bytes;
int dec_tile_row, dec_tile_col;
#endif // CONFIG_EXT_TILE
-} VP10Decoder;
+} AV1Decoder;
-int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
- const uint8_t **dest);
+int av1_receive_compressed_data(struct AV1Decoder *pbi, size_t size,
+ const uint8_t **dest);
-int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
+int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
-int vp10_get_frame_to_show(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame);
+int av1_get_frame_to_show(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame);
-vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
-
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
+ AOM_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+static INLINE uint8_t read_marker(aom_decrypt_cb decrypt_cb,
void *decrypt_state, const uint8_t *data) {
if (decrypt_cb) {
uint8_t marker;
@@ -124,14 +124,14 @@
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state);
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ aom_decrypt_cb decrypt_cb,
+ void *decrypt_state);
-struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
+struct AV1Decoder *av1_decoder_create(BufferPool *const pool);
-void vp10_decoder_remove(struct VP10Decoder *pbi);
+void av1_decoder_remove(struct AV1Decoder *pbi);
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
BufferPool *const pool) {
@@ -149,9 +149,9 @@
}
#if CONFIG_EXT_REFS
-static INLINE int dec_is_ref_frame_buf(VP10Decoder *const pbi,
+static INLINE int dec_is_ref_frame_buf(AV1Decoder *const pbi,
RefCntBuffer *frame_buf) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
int i;
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -166,4 +166,4 @@
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODER_H_
+#endif // AV1_DECODER_DECODER_H_
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index 0fba999..0935cdf 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/ans.h"
@@ -37,9 +37,9 @@
} while (0)
#if !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *probs, int n, vp10_reader *r) {
+static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
int i, val = 0;
- for (i = 0; i < n; ++i) val = (val << 1) | vp10_read(r, probs[i]);
+ for (i = 0; i < n; ++i) val = (val << 1) | aom_read(r, probs[i]);
return val;
}
@@ -47,7 +47,7 @@
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq, int ctx, const int16_t *scan,
- const int16_t *nb, vp10_reader *r,
+ const int16_t *nb, aom_reader *r,
const qm_val_t *iqm[2][TX_SIZES])
#else
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
@@ -57,7 +57,7 @@
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
int ctx, const int16_t *scan, const int16_t *nb,
- vp10_reader *r)
+ aom_reader *r)
#endif
{
FRAME_COUNTS *counts = xd->counts;
@@ -69,9 +69,9 @@
#endif
int band, c = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
- const vpx_prob *prob;
+ const aom_prob *prob;
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[MAX_TX_SQUARE];
@@ -94,38 +94,38 @@
eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
}
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->bd > VPX_BITS_8) {
- if (xd->bd == VPX_BITS_10) {
- cat1_prob = vp10_cat1_prob_high10;
- cat2_prob = vp10_cat2_prob_high10;
- cat3_prob = vp10_cat3_prob_high10;
- cat4_prob = vp10_cat4_prob_high10;
- cat5_prob = vp10_cat5_prob_high10;
- cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (xd->bd > AOM_BITS_8) {
+ if (xd->bd == AOM_BITS_10) {
+ cat1_prob = av1_cat1_prob_high10;
+ cat2_prob = av1_cat2_prob_high10;
+ cat3_prob = av1_cat3_prob_high10;
+ cat4_prob = av1_cat4_prob_high10;
+ cat5_prob = av1_cat5_prob_high10;
+ cat6_prob = av1_cat6_prob_high10;
} else {
- cat1_prob = vp10_cat1_prob_high12;
- cat2_prob = vp10_cat2_prob_high12;
- cat3_prob = vp10_cat3_prob_high12;
- cat4_prob = vp10_cat4_prob_high12;
- cat5_prob = vp10_cat5_prob_high12;
- cat6_prob = vp10_cat6_prob_high12;
+ cat1_prob = av1_cat1_prob_high12;
+ cat2_prob = av1_cat2_prob_high12;
+ cat3_prob = av1_cat3_prob_high12;
+ cat4_prob = av1_cat4_prob_high12;
+ cat5_prob = av1_cat5_prob_high12;
+ cat6_prob = av1_cat6_prob_high12;
}
} else {
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
}
#else
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
#endif
dq_shift = get_tx_scale(xd, tx_type, tx_size);
@@ -135,7 +135,7 @@
band = *band_translate++;
prob = coef_probs[band][ctx];
if (counts) ++eob_branch_count[band][ctx];
- if (!vp10_read(r, prob[EOB_CONTEXT_NODE])) {
+ if (!aom_read(r, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
}
@@ -144,7 +144,7 @@
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
- while (!vp10_read(r, prob[ZERO_CONTEXT_NODE])) {
+ while (!aom_read(r, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
dqv = dq[1];
token_cache[scan[c]] = 0;
@@ -158,14 +158,14 @@
#endif // CONFIG_NEW_QUANT
}
- if (!vp10_read(r, prob[ONE_CONTEXT_NODE])) {
+ if (!aom_read(r, prob[ONE_CONTEXT_NODE])) {
INCREMENT_COUNT(ONE_TOKEN);
token = ONE_TOKEN;
val = 1;
} else {
INCREMENT_COUNT(TWO_TOKEN);
- token = vp10_read_tree(r, vp10_coef_con_tree,
- vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+ token = aom_read_tree(r, av1_coef_con_tree,
+ av1_pareto8_full[prob[PIVOT_NODE] - 1]);
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
@@ -188,15 +188,15 @@
case CATEGORY6_TOKEN: {
const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (xd->bd) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r);
break;
default: assert(0); return -1;
@@ -210,7 +210,7 @@
}
#if CONFIG_NEW_QUANT
- v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
#if CONFIG_AOM_QM
@@ -221,15 +221,15 @@
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
- dqcoeff[scan[c]] = highbd_check_range((vp10_read_bit(r) ? -v : v), xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ dqcoeff[scan[c]] = highbd_check_range((aom_read_bit(r) ? -v : v), xd->bd);
#else
- dqcoeff[scan[c]] = check_range(vp10_read_bit(r) ? -v : v);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ dqcoeff[scan[c]] = check_range(aom_read_bit(r) ? -v : v);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
- dqcoeff[scan[c]] = vp10_read_bit(r) ? -v : v;
+ dqcoeff[scan[c]] = aom_read_bit(r) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
dqv = dq[1];
@@ -238,7 +238,7 @@
return c;
}
#else // !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *const probs, int n,
+static INLINE int read_coeff(const aom_prob *const probs, int n,
struct AnsDecoder *const ans) {
int i, val = 0;
for (i = 0; i < n; ++i) val = (val << 1) | uabs_read(ans, probs[i]);
@@ -260,11 +260,11 @@
int band, c = 0;
int skip_eob = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const rans_dec_lut(*coef_cdfs)[COEFF_CONTEXTS] =
fc->coef_cdfs[tx_size_ctx][type][ref];
- const vpx_prob *prob;
+ const aom_prob *prob;
const rans_dec_lut *cdf;
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
@@ -290,38 +290,38 @@
eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
}
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->bd > VPX_BITS_8) {
- if (xd->bd == VPX_BITS_10) {
- cat1_prob = vp10_cat1_prob_high10;
- cat2_prob = vp10_cat2_prob_high10;
- cat3_prob = vp10_cat3_prob_high10;
- cat4_prob = vp10_cat4_prob_high10;
- cat5_prob = vp10_cat5_prob_high10;
- cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (xd->bd > AOM_BITS_8) {
+ if (xd->bd == AOM_BITS_10) {
+ cat1_prob = av1_cat1_prob_high10;
+ cat2_prob = av1_cat2_prob_high10;
+ cat3_prob = av1_cat3_prob_high10;
+ cat4_prob = av1_cat4_prob_high10;
+ cat5_prob = av1_cat5_prob_high10;
+ cat6_prob = av1_cat6_prob_high10;
} else {
- cat1_prob = vp10_cat1_prob_high12;
- cat2_prob = vp10_cat2_prob_high12;
- cat3_prob = vp10_cat3_prob_high12;
- cat4_prob = vp10_cat4_prob_high12;
- cat5_prob = vp10_cat5_prob_high12;
- cat6_prob = vp10_cat6_prob_high12;
+ cat1_prob = av1_cat1_prob_high12;
+ cat2_prob = av1_cat2_prob_high12;
+ cat3_prob = av1_cat3_prob_high12;
+ cat4_prob = av1_cat4_prob_high12;
+ cat5_prob = av1_cat5_prob_high12;
+ cat6_prob = av1_cat6_prob_high12;
}
} else {
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
}
#else
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
#endif
while (c < max_eob) {
@@ -371,15 +371,15 @@
case CATEGORY6_TOKEN: {
const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (xd->bd) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
break;
default: assert(0); return -1;
@@ -390,23 +390,23 @@
} break;
}
#if CONFIG_NEW_QUANT
- v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
v = (val * dqv) >> dq_shift;
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dqcoeff[scan[c]] =
highbd_check_range((uabs_read_bit(ans) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
skip_eob = 0;
}
++c;
@@ -418,8 +418,8 @@
}
#endif // !CONFIG_ANS
-// TODO(slavarnway): Decode version of vp10_set_context. Modify
-// vp10_set_context
+// TODO(slavarnway): Decode version of av1_set_context. Modify
+// av1_set_context
// after testing is complete, then delete this version.
static void dec_set_contexts(const MACROBLOCKD *xd,
struct macroblockd_plane *pd, TX_SIZE tx_size,
@@ -459,8 +459,8 @@
}
}
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
- vp10_reader *r) {
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
+ aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -472,31 +472,31 @@
int n = mbmi->palette_mode_info.palette_size[plane != 0];
int i, j;
uint8_t *color_map = xd->plane[plane != 0].color_index_map;
- const vpx_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
- plane ? vp10_default_palette_uv_color_prob
- : vp10_default_palette_y_color_prob;
+ const aom_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+ plane ? av1_default_palette_uv_color_prob
+ : av1_default_palette_y_color_prob;
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
color_ctx =
- vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
- color_idx = vp10_read_tree(r, vp10_palette_color_tree[n - 2],
- prob[n - 2][color_ctx]);
+ av1_get_palette_color_context(color_map, cols, i, j, n, color_order);
+ color_idx = aom_read_tree(r, av1_palette_color_tree[n - 2],
+ prob[n - 2][color_ctx]);
assert(color_idx >= 0 && color_idx < n);
color_map[i * cols + j] = color_order[color_idx];
}
}
}
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
- const scan_order *sc, int x, int y,
- TX_SIZE tx_size, TX_TYPE tx_type,
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ TX_TYPE tx_type,
#if CONFIG_ANS
- struct AnsDecoder *const r,
+ struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
- int seg_id) {
+ int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant[seg_id];
const int ctx =
@@ -528,7 +528,7 @@
#endif // !CONFIG_ANS
dec_set_contexts(xd, pd, tx_size, eob > 0, x, y);
/*
- vp10_set_contexts(xd, pd,
+ av1_set_contexts(xd, pd,
get_plane_block_size(xd->mi[0]->mbmi.sb_type, pd),
tx_size, eob > 0, x, y);
*/
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 279c193..959e374 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DETOKENIZE_H_
-#define VP10_DECODER_DETOKENIZE_H_
+#ifndef AV1_DECODER_DETOKENIZE_H_
+#define AV1_DECODER_DETOKENIZE_H_
#include "av1/decoder/decoder.h"
#include "av1/common/ans.h"
@@ -19,20 +19,19 @@
extern "C" {
#endif
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
- vp10_reader *r);
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
- const scan_order *sc, int x, int y,
- TX_SIZE tx_size, TX_TYPE tx_type,
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane, aom_reader *r);
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ TX_TYPE tx_type,
#if CONFIG_ANS
- struct AnsDecoder *const r,
+ struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
- int seg_id);
+ int seg_id);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DETOKENIZE_H_
+#endif // AV1_DECODER_DETOKENIZE_H_
diff --git a/av1/decoder/dsubexp.c b/av1/decoder/dsubexp.c
index 146a1de..c0fee8d 100644
--- a/av1/decoder/dsubexp.c
+++ b/av1/decoder/dsubexp.c
@@ -20,11 +20,11 @@
return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
}
-static int decode_uniform(vp10_reader *r) {
+static int decode_uniform(aom_reader *r) {
const int l = 8;
const int m = (1 << l) - 190;
- const int v = vp10_read_literal(r, l - 1);
- return v < m ? v : (v << 1) - m + vp10_read_bit(r);
+ const int v = aom_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + aom_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
@@ -57,24 +57,24 @@
}
}
-static int decode_term_subexp(vp10_reader *r) {
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 4);
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 4) + 16;
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 5) + 32;
+static int decode_term_subexp(aom_reader *r) {
+ if (!aom_read_bit(r)) return aom_read_literal(r, 4);
+ if (!aom_read_bit(r)) return aom_read_literal(r, 4) + 16;
+ if (!aom_read_bit(r)) return aom_read_literal(r, 5) + 32;
return decode_uniform(r) + 64;
}
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p) {
- if (vp10_read(r, DIFF_UPDATE_PROB)) {
+void av1_diff_update_prob(aom_reader *r, aom_prob *p) {
+ if (aom_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
- *p = (vpx_prob)inv_remap_prob(delp, *p);
+ *p = (aom_prob)inv_remap_prob(delp, *p);
}
}
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits) {
- if (vp10_read_bit(r)) {
- int s = vp10_read_bit(r);
- int x = vp10_read_literal(r, mag_bits) + 1;
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits) {
+ if (aom_read_bit(r)) {
+ int s = aom_read_bit(r);
+ int x = aom_read_literal(r, mag_bits) + 1;
return (s > 0 ? -x : x);
} else {
return 0;
diff --git a/av1/decoder/dsubexp.h b/av1/decoder/dsubexp.h
index b8980f7..8587395 100644
--- a/av1/decoder/dsubexp.h
+++ b/av1/decoder/dsubexp.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DSUBEXP_H_
-#define VP10_DECODER_DSUBEXP_H_
+#ifndef AV1_DECODER_DSUBEXP_H_
+#define AV1_DECODER_DSUBEXP_H_
#include "av1/decoder/bitreader.h"
@@ -17,7 +17,7 @@
extern "C" {
#endif
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p);
+void av1_diff_update_prob(aom_reader *r, aom_prob *p);
#ifdef __cplusplus
} // extern "C"
@@ -27,5 +27,5 @@
// 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
// indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
// and 1 more bit for the sign if non-zero.
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits);
-#endif // VP10_DECODER_DSUBEXP_H_
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits);
+#endif // AV1_DECODER_DSUBEXP_H_
diff --git a/av1/decoder/dthread.c b/av1/decoder/dthread.c
index d9a2ce1..6f6a934 100644
--- a/av1/decoder/dthread.c
+++ b/av1/decoder/dthread.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/reconinter.h"
#include "av1/decoder/dthread.h"
#include "av1/decoder/decoder.h"
@@ -17,7 +17,7 @@
// #define DEBUG_THREAD
// TODO(hkuang): Clean up all the #ifdef in this file.
-void vp10_frameworker_lock_stats(VPxWorker *const worker) {
+void av1_frameworker_lock_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_lock(&worker_data->stats_mutex);
@@ -26,7 +26,7 @@
#endif
}
-void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
+void av1_frameworker_unlock_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_unlock(&worker_data->stats_mutex);
@@ -35,7 +35,7 @@
#endif
}
-void vp10_frameworker_signal_stats(VPxWorker *const worker) {
+void av1_frameworker_signal_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
@@ -59,8 +59,8 @@
#endif
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row) {
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+ int row) {
#if CONFIG_MULTITHREAD
if (!ref_buf) return;
@@ -73,10 +73,10 @@
{
// Find the worker thread that owns the reference frame. If the reference
// frame has been fully decoded, it may not have owner.
- VPxWorker *const ref_worker = ref_buf->frame_worker_owner;
+ AVxWorker *const ref_worker = ref_buf->frame_worker_owner;
FrameWorkerData *const ref_worker_data =
(FrameWorkerData *)ref_worker->data1;
- const VP10Decoder *const pbi = ref_worker_data->pbi;
+ const AV1Decoder *const pbi = ref_worker_data->pbi;
#ifdef DEBUG_THREAD
{
@@ -87,7 +87,7 @@
}
#endif
- vp10_frameworker_lock_stats(ref_worker);
+ av1_frameworker_lock_stats(ref_worker);
while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
ref_buf->buf.corrupted != 1) {
pthread_cond_wait(&ref_worker_data->stats_cond,
@@ -96,12 +96,12 @@
if (ref_buf->buf.corrupted == 1) {
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
- vp10_frameworker_unlock_stats(ref_worker);
- vpx_internal_error(&worker_data->pbi->common.error,
- VPX_CODEC_CORRUPT_FRAME,
+ av1_frameworker_unlock_stats(ref_worker);
+ aom_internal_error(&worker_data->pbi->common.error,
+ AOM_CODEC_CORRUPT_FRAME,
"Worker %p failed to decode frame", worker);
}
- vp10_frameworker_unlock_stats(ref_worker);
+ av1_frameworker_unlock_stats(ref_worker);
}
#else
(void)worker;
@@ -111,9 +111,9 @@
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row) {
#if CONFIG_MULTITHREAD
- VPxWorker *worker = buf->frame_worker_owner;
+ AVxWorker *worker = buf->frame_worker_owner;
#ifdef DEBUG_THREAD
{
@@ -123,27 +123,27 @@
}
#endif
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
buf->row = row;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
#else
(void)buf;
(void)row;
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker) {
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+ AVxWorker *const src_worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
- VP10_COMMON *const src_cm = &src_worker_data->pbi->common;
- VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+ AV1_COMMON *const src_cm = &src_worker_data->pbi->common;
+ AV1_COMMON *const dst_cm = &dst_worker_data->pbi->common;
int i;
// Wait until source frame's context is ready.
- vp10_frameworker_lock_stats(src_worker);
+ av1_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
&src_worker_data->stats_mutex);
@@ -153,10 +153,10 @@
? src_cm->current_frame_seg_map
: src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
- vp10_frameworker_unlock_stats(src_worker);
+ av1_frameworker_unlock_stats(src_worker);
dst_cm->bit_depth = src_cm->bit_depth;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
#if CONFIG_EXT_REFS
diff --git a/av1/decoder/dthread.h b/av1/decoder/dthread.h
index ef548b6..84fb714 100644
--- a/av1/decoder/dthread.h
+++ b/av1/decoder/dthread.h
@@ -8,24 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DTHREAD_H_
-#define VP10_DECODER_DTHREAD_H_
+#ifndef AV1_DECODER_DTHREAD_H_
+#define AV1_DECODER_DTHREAD_H_
-#include "./vpx_config.h"
-#include "aom_util/vpx_thread.h"
-#include "aom/internal/vpx_codec_internal.h"
+#include "./aom_config.h"
+#include "aom_util/aom_thread.h"
+#include "aom/internal/aom_codec_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Common;
-struct VP10Decoder;
+struct AV1Common;
+struct AV1Decoder;
// WorkerData for the FrameWorker thread. It contains all the information of
// the worker and decode structures for decoding a frame.
typedef struct FrameWorkerData {
- struct VP10Decoder *pbi;
+ struct AV1Decoder *pbi;
const uint8_t *data;
const uint8_t *data_end;
size_t data_size;
@@ -48,27 +48,27 @@
int frame_decoded; // Finished decoding current frame.
} FrameWorkerData;
-void vp10_frameworker_lock_stats(VPxWorker *const worker);
-void vp10_frameworker_unlock_stats(VPxWorker *const worker);
-void vp10_frameworker_signal_stats(VPxWorker *const worker);
+void av1_frameworker_lock_stats(AVxWorker *const worker);
+void av1_frameworker_unlock_stats(AVxWorker *const worker);
+void av1_frameworker_signal_stats(AVxWorker *const worker);
// Wait until ref_buf has been decoded to row in real pixel unit.
// Note: worker may already finish decoding ref_buf and release it in order to
// start decoding next frame. So need to check whether worker is still decoding
// ref_buf.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row);
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+ int row);
// FrameWorker broadcasts its decoding progress so other workers that are
// waiting on it can resume decoding.
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row);
// Copy necessary decoding context from src worker to dst worker.
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker);
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+ AVxWorker *const src_worker);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DTHREAD_H_
+#endif // AV1_DECODER_DTHREAD_H_
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
index 173556e..485e4c9 100644
--- a/av1/encoder/aq_complexity.c
+++ b/av1/encoder/aq_complexity.c
@@ -16,7 +16,7 @@
#include "av1/encoder/encodeframe.h"
#include "av1/common/seg_common.h"
#include "av1/encoder/segmentation.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/system_state.h"
#define AQ_C_SEGMENTS 5
@@ -40,18 +40,18 @@
#define DEFAULT_COMPLEXITY 64
-static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
+static int get_aq_c_strength(int q_index, aom_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
- const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
+ const int base_quant = av1_ac_quant(q_index, 0, bit_depth) / 4;
return (base_quant > 10) + (base_quant > 25);
}
-void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
struct segmentation *const seg = &cm->seg;
// Make SURE use of floating point in this function is safe.
- vpx_clear_system_state();
+ aom_clear_system_state();
if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
cpi->refresh_alt_ref_frame ||
@@ -62,22 +62,22 @@
// Clear down the segment map.
memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
// Segmentation only makes sense if the target bits per SB is above a
// threshold. Below this the overheads will usually outweigh any benefit.
if (cpi->rc.sb64_target_rate < 256) {
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
return;
}
- vp10_enable_segmentation(seg);
+ av1_enable_segmentation(seg);
// Select delta coding method.
seg->abs_delta = SEGMENT_DELTADATA;
// Default segment "Q" feature is disabled so it defaults to the baseline Q.
- vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
+ av1_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
// Use some of the segments for in frame Q adjustment.
for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
@@ -85,7 +85,7 @@
if (segment == DEFAULT_AQ2_SEG) continue;
- qindex_delta = vp10_compute_qdelta_by_rate(
+ qindex_delta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, cm->base_qindex,
aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
@@ -97,8 +97,8 @@
qindex_delta = -cm->base_qindex + 1;
}
if ((cm->base_qindex + qindex_delta) > 0) {
- vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
- vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+ av1_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+ av1_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
}
}
}
@@ -110,13 +110,13 @@
// Select a segment for the current block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
-void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
- int mi_row, int mi_col, int projected_rate) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+ int mi_row, int mi_col, int projected_rate) {
+ AV1_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
- const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
- const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]);
+ const int xmis = AOMMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
+ const int ymis = AOMMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]);
int x, y;
int i;
unsigned char segment;
@@ -132,13 +132,13 @@
double low_var_thresh;
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
- vpx_clear_system_state();
- low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+ aom_clear_system_state();
+ low_var_thresh = (cpi->oxcf.pass == 2) ? AOMMAX(cpi->twopass.mb_av_energy,
MIN_DEFAULT_LV_THRESH)
: DEFAULT_LV_THRESH;
- vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
- logvar = vp10_log_block_var(cpi, mb, bs);
+ av1_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
+ logvar = av1_log_block_var(cpi, mb, bs);
segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
for (i = 0; i < AQ_C_SEGMENTS; ++i) {
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
index db85406..465b8d72 100644
--- a/av1/encoder/aq_complexity.h
+++ b/av1/encoder/aq_complexity.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
-#define VP10_ENCODER_AQ_COMPLEXITY_H_
+#ifndef AV1_ENCODER_AQ_COMPLEXITY_H_
+#define AV1_ENCODER_AQ_COMPLEXITY_H_
#ifdef __cplusplus
extern "C" {
@@ -17,20 +17,20 @@
#include "av1/common/enums.h"
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
// Select a segment for the current Block.
-void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
- BLOCK_SIZE bs, int mi_row, int mi_col,
- int projected_rate);
+void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
+ BLOCK_SIZE bs, int mi_row, int mi_col,
+ int projected_rate);
// This function sets up a set of segments with delta Q values around
// the baseline frame quantizer.
-void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi);
+void av1_setup_in_frame_q_adj(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_COMPLEXITY_H_
+#endif // AV1_ENCODER_AQ_COMPLEXITY_H_
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
index b7897f9..cbd8cc1 100644
--- a/av1/encoder/aq_cyclicrefresh.c
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -15,7 +15,7 @@
#include "av1/encoder/aq_cyclicrefresh.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/segmentation.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/system_state.h"
struct CYCLIC_REFRESH {
@@ -56,20 +56,20 @@
int qindex_delta[3];
};
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
size_t last_coded_q_map_size;
- CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
+ CYCLIC_REFRESH *const cr = aom_calloc(1, sizeof(*cr));
if (cr == NULL) return NULL;
- cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
+ cr->map = aom_calloc(mi_rows * mi_cols, sizeof(*cr->map));
if (cr->map == NULL) {
- vp10_cyclic_refresh_free(cr);
+ av1_cyclic_refresh_free(cr);
return NULL;
}
last_coded_q_map_size = mi_rows * mi_cols * sizeof(*cr->last_coded_q_map);
- cr->last_coded_q_map = vpx_malloc(last_coded_q_map_size);
+ cr->last_coded_q_map = aom_malloc(last_coded_q_map_size);
if (cr->last_coded_q_map == NULL) {
- vp10_cyclic_refresh_free(cr);
+ av1_cyclic_refresh_free(cr);
return NULL;
}
assert(MAXQ <= 255);
@@ -78,14 +78,14 @@
return cr;
}
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
- vpx_free(cr->map);
- vpx_free(cr->last_coded_q_map);
- vpx_free(cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
+ aom_free(cr->map);
+ aom_free(cr->last_coded_q_map);
+ aom_free(cr);
}
// Check if we should turn off cyclic refresh based on bitrate condition.
-static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
+static int apply_cyclic_refresh_bitrate(const AV1_COMMON *cm,
const RATE_CONTROL *rc) {
// Turn off cyclic refresh if bits available per frame is not sufficiently
// larger than bit cost of segmentation. Segment map bit cost should scale
@@ -133,11 +133,11 @@
}
// Compute delta-q for the segment.
-static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
+static int compute_deltaq(const AV1_COMP *cpi, int q, double rate_factor) {
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const RATE_CONTROL *const rc = &cpi->rc;
- int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
- rate_factor, cpi->common.bit_depth);
+ int deltaq = av1_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+ rate_factor, cpi->common.bit_depth);
if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
deltaq = -cr->max_qdelta_perc * q / 100;
}
@@ -148,9 +148,9 @@
// from non-base segment. For now ignore effect of multiple segments
// (with different delta-q). Note this function is called in the postencode
// (called from rc_update_rate_correction_factors()).
-int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
- double correction_factor) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_cyclic_refresh_estimate_bits_at_q(const AV1_COMP *cpi,
+ double correction_factor) {
+ const AV1_COMMON *const cm = &cpi->common;
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int estimated_bits;
int mbs = cm->MBs;
@@ -162,16 +162,16 @@
// Take segment weighted average for estimated bits.
estimated_bits =
(int)((1.0 - weight_segment1 - weight_segment2) *
- vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
- correction_factor, cm->bit_depth) +
+ av1_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+ correction_factor, cm->bit_depth) +
weight_segment1 *
- vp10_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[1],
- mbs, correction_factor, cm->bit_depth) +
+ av1_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[1],
+ mbs, correction_factor, cm->bit_depth) +
weight_segment2 *
- vp10_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[2],
- mbs, correction_factor, cm->bit_depth));
+ av1_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[2],
+ mbs, correction_factor, cm->bit_depth));
return estimated_bits;
}
@@ -180,9 +180,9 @@
// rc_regulate_q() to set the base qp index.
// Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
// to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
-int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
- double correction_factor) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
+ double correction_factor) {
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int bits_per_mb;
int num8x8bl = cm->MBs << 2;
@@ -196,29 +196,28 @@
// Compute delta-q corresponding to qindex i.
int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
// Take segment weighted average for bits per mb.
- bits_per_mb =
- (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
- correction_factor,
- cm->bit_depth) +
- weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
- correction_factor,
- cm->bit_depth));
+ bits_per_mb = (int)((1.0 - weight_segment) *
+ av1_rc_bits_per_mb(cm->frame_type, i,
+ correction_factor, cm->bit_depth) +
+ weight_segment *
+ av1_rc_bits_per_mb(cm->frame_type, i + deltaq,
+ correction_factor, cm->bit_depth));
return bits_per_mb;
}
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
- MB_MODE_INFO *const mbmi, int mi_row,
- int mi_col, BLOCK_SIZE bsize,
- int64_t rate, int64_t dist, int skip) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
+ MB_MODE_INFO *const mbmi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist, int skip) {
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
- const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int xmis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int ymis = AOMMIN(cm->mi_rows - mi_row, bh);
const int block_index = mi_row * cm->mi_cols + mi_col;
const int refresh_this_block =
candidate_refresh_aq(cr, mbmi, rate, dist, bsize);
@@ -269,7 +268,7 @@
} else if (is_inter_block(mbmi) && skip &&
mbmi->segment_id <= CR_SEGMENT_ID_BOOST2) {
cr->last_coded_q_map[map_offset] =
- VPXMIN(clamp(cm->base_qindex + cr->qindex_delta[mbmi->segment_id],
+ AOMMIN(clamp(cm->base_qindex + cr->qindex_delta[mbmi->segment_id],
0, MAXQ),
cr->last_coded_q_map[map_offset]);
}
@@ -277,8 +276,8 @@
}
// Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_postencode(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->segmentation_map;
int mi_row, mi_col;
@@ -297,7 +296,7 @@
}
// Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_set_golden_update(AV1_COMP *const cpi) {
RATE_CONTROL *const rc = &cpi->rc;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
// Set minimum gf_interval for GF update to a multiple (== 2) of refresh
@@ -313,8 +312,8 @@
// background has high motion, refresh the golden frame. Otherwise, if the
// golden reference is to be updated check if we should NOT update the golden
// ref.
-void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_check_golden_update(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int mi_row, mi_col;
double fraction_low = 0.0;
@@ -356,7 +355,7 @@
// the resolution (resize_pending != 0).
if (cpi->resize_pending != 0 ||
(cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
- vp10_cyclic_refresh_set_golden_update(cpi);
+ av1_cyclic_refresh_set_golden_update(cpi);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
if (rc->frames_till_gf_update_due > rc->frames_to_key)
@@ -385,8 +384,8 @@
// 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock.
// Blocks labeled as BOOST1 may later get set to BOOST2 (during the
// encoding of the superblock).
-static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->segmentation_map;
int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
@@ -411,15 +410,15 @@
int mi_row = sb_row_index * cm->mib_size;
int mi_col = sb_col_index * cm->mib_size;
int qindex_thresh =
- cpi->oxcf.content == VPX_CONTENT_SCREEN
- ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+ cpi->oxcf.content == AOM_CONTENT_SCREEN
+ ? av1_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
: 0;
assert(mi_row >= 0 && mi_row < cm->mi_rows);
assert(mi_col >= 0 && mi_col < cm->mi_cols);
bl_index = mi_row * cm->mi_cols + mi_col;
// Loop through all MI blocks in superblock and update map.
- xmis = VPXMIN(cm->mi_cols - mi_col, cm->mib_size);
- ymis = VPXMIN(cm->mi_rows - mi_row, cm->mib_size);
+ xmis = AOMMIN(cm->mi_cols - mi_col, cm->mib_size);
+ ymis = AOMMIN(cm->mi_rows - mi_row, cm->mib_size);
for (y = 0; y < ymis; y++) {
for (x = 0; x < xmis; x++) {
const int bl_index2 = bl_index + y * cm->mi_cols + x;
@@ -451,9 +450,9 @@
}
// Set cyclic refresh parameters.
-void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
cr->percent_refresh = 10;
cr->max_qdelta_perc = 50;
@@ -475,8 +474,8 @@
}
// Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
struct segmentation *const seg = &cm->seg;
@@ -487,7 +486,7 @@
// Set segmentation map to 0 and disable.
unsigned char *const seg_map = cpi->segmentation_map;
memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
- vp10_disable_segmentation(&cm->seg);
+ av1_disable_segmentation(&cm->seg);
if (cm->frame_type == KEY_FRAME) {
memset(cr->last_coded_q_map, MAXQ,
cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
@@ -497,37 +496,37 @@
} else {
int qindex_delta = 0;
int qindex2;
- const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
- vpx_clear_system_state();
+ const double q = av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
+ aom_clear_system_state();
// Set rate threshold to some multiple (set to 2 for now) of the target
// rate (target is given by sb64_target_rate and scaled by 256).
cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
// Distortion threshold, quadratic in Q, scale factor to be adjusted.
// q will not exceed 457, so (q * q) is within 32bit; see:
- // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[].
+ // av1_convert_qindex_to_q(), av1_ac_quant(), ac_qlookup*[].
cr->thresh_dist_sb = ((int64_t)(q * q)) << 2;
// Set up segmentation.
// Clear down the segment map.
- vp10_enable_segmentation(&cm->seg);
- vp10_clearall_segfeatures(seg);
+ av1_enable_segmentation(&cm->seg);
+ av1_clearall_segfeatures(seg);
// Select delta coding method.
seg->abs_delta = SEGMENT_DELTADATA;
// Note: setting temporal_update has no effect, as the seg-map coding method
// (temporal or spatial) is determined in
- // vp10_choose_segmap_coding_method(),
+ // av1_choose_segmap_coding_method(),
// based on the coding cost of each method. For error_resilient mode on the
// last_frame_seg_map is set to 0, so if temporal coding is used, it is
// relative to 0 previous map.
// seg->temporal_update = 0;
// Segment BASE "Q" feature is disabled so it defaults to the baseline Q.
- vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
+ av1_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
// Use segment BOOST1 for in-frame Q adjustment.
- vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
// Use segment BOOST2 for more aggressive in-frame Q adjustment.
- vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
// Set the q delta for segment BOOST1.
qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta);
@@ -536,29 +535,29 @@
// Compute rd-mult for segment BOOST1.
qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ);
- cr->rdmult = vp10_compute_rd_mult(cpi, qindex2);
+ cr->rdmult = av1_compute_rd_mult(cpi, qindex2);
- vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
+ av1_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
// Set a more aggressive (higher) q delta for segment BOOST2.
qindex_delta = compute_deltaq(
cpi, cm->base_qindex,
- VPXMIN(CR_MAX_RATE_TARGET_RATIO,
+ AOMMIN(CR_MAX_RATE_TARGET_RATIO,
0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta));
cr->qindex_delta[2] = qindex_delta;
- vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
+ av1_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
// Update the segmentation and refresh map.
cyclic_refresh_update_map(cpi);
}
}
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
return cr->rdmult;
}
-void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
memset(cr->map, 0, cm->mi_rows * cm->mi_cols);
cr->sb_index = 0;
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
index 24491fc..3e59dfd 100644
--- a/av1/encoder/aq_cyclicrefresh.h
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
-#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#ifndef AV1_ENCODER_AQ_CYCLICREFRESH_H_
+#define AV1_ENCODER_AQ_CYCLICREFRESH_H_
#include "av1/common/blockd.h"
@@ -26,55 +26,55 @@
// Maximum rate target ratio for setting segment delta-qp.
#define CR_MAX_RATE_TARGET_RATIO 4.0
-struct VP10_COMP;
+struct AV1_COMP;
struct CYCLIC_REFRESH;
typedef struct CYCLIC_REFRESH CYCLIC_REFRESH;
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols);
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols);
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr);
// Estimate the bits, incorporating the delta-q from segment 1, after encoding
// the frame.
-int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
- double correction_factor);
+int av1_cyclic_refresh_estimate_bits_at_q(const struct AV1_COMP *cpi,
+ double correction_factor);
// Estimate the bits per mb, for a given q = i and a corresponding delta-q
// (for segment 1), prior to encoding the frame.
-int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
- double correction_factor);
+int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
+ double correction_factor);
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
- MB_MODE_INFO *const mbmi, int mi_row,
- int mi_col, BLOCK_SIZE bsize,
- int64_t rate, int64_t dist, int skip);
+void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
+ MB_MODE_INFO *const mbmi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist, int skip);
// Update the segmentation map, and related quantities: cyclic refresh map,
// refresh sb_index, and target number of blocks to be refreshed.
-void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update__map(struct AV1_COMP *const cpi);
// Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_postencode(struct AV1_COMP *const cpi);
// Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_set_golden_update(struct AV1_COMP *const cpi);
// Check if we should not update golden reference, based on past refresh stats.
-void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_check_golden_update(struct AV1_COMP *const cpi);
// Set/update global/frame level refresh parameters.
-void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update_parameters(struct AV1_COMP *const cpi);
// Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_setup(struct AV1_COMP *const cpi);
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
-void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_reset_resize(struct AV1_COMP *const cpi);
static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) {
return segment_id == CR_SEGMENT_ID_BOOST1 ||
@@ -94,4 +94,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#endif // AV1_ENCODER_AQ_CYCLICREFRESH_H_
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
index 2a529a1..4e31c35 100644
--- a/av1/encoder/aq_variance.c
+++ b/av1/encoder/aq_variance.c
@@ -32,19 +32,19 @@
#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
-DECLARE_ALIGNED(16, static const uint8_t, vp10_all_zeros[MAX_SB_SIZE]) = { 0 };
-#if CONFIG_VP9_HIGHBITDEPTH
+DECLARE_ALIGNED(16, static const uint8_t, av1_all_zeros[MAX_SB_SIZE]) = { 0 };
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, static const uint16_t,
- vp10_highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
+ av1_highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
#endif
-unsigned int vp10_vaq_segment_id(int energy) {
+unsigned int av1_vaq_segment_id(int energy) {
ENERGY_IN_BOUNDS(energy);
return SEGMENT_ID(energy);
}
-void vp10_vaq_frame_setup(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_vaq_frame_setup(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
struct segmentation *seg = &cm->seg;
int i;
@@ -53,17 +53,17 @@
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
cpi->vaq_refresh = 1;
- vp10_enable_segmentation(seg);
- vp10_clearall_segfeatures(seg);
+ av1_enable_segmentation(seg);
+ av1_clearall_segfeatures(seg);
seg->abs_delta = SEGMENT_DELTADATA;
- vpx_clear_system_state();
+ aom_clear_system_state();
for (i = 0; i < MAX_SEGMENTS; ++i) {
int qindex_delta =
- vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
- rate_ratio[i], cm->bit_depth);
+ av1_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
+ rate_ratio[i], cm->bit_depth);
// We don't allow qindex 0 in a segment if the base value is not 0.
// Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
@@ -78,8 +78,8 @@
continue;
}
- vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
- vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+ av1_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
+ av1_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
}
}
}
@@ -107,7 +107,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
uint64_t *sse, uint64_t *sum) {
@@ -138,9 +138,9 @@
*sse = (unsigned int)sse_long;
*sum = (int)sum_long;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
+static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
@@ -153,54 +153,54 @@
const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
int avg;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0, bw, bh,
+ CONVERT_TO_BYTEPTR(av1_highbd_all_zeros), 0, bw, bh,
&sse, &avg);
sse >>= 2 * (xd->bd - 8);
avg >>= (xd->bd - 8);
} else {
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros,
- 0, bw, bh, &sse, &avg);
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_all_zeros, 0,
+ bw, bh, &sse, &avg);
}
#else
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros, 0,
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_all_zeros, 0,
bw, bh, &sse, &avg);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
var = sse - (((int64_t)avg * avg) / (bw * bh));
return (256 * var) / (bw * bh);
} else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0,
- &sse);
+ var =
+ cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ CONVERT_TO_BYTEPTR(av1_highbd_all_zeros), 0, &sse);
} else {
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_all_zeros, 0, &sse);
+ av1_all_zeros, 0, &sse);
}
#else
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_all_zeros, 0, &sse);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_all_zeros, 0, &sse);
+#endif // CONFIG_AOM_HIGHBITDEPTH
return (256 * var) >> num_pels_log2_lookup[bs];
}
}
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
- vpx_clear_system_state();
+ aom_clear_system_state();
return log(var + 1.0);
}
#define DEFAULT_E_MIDPOINT 10.0
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
- vpx_clear_system_state();
+ aom_clear_system_state();
energy_midpoint =
(cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
- energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
+ energy = av1_log_block_var(cpi, x, bs) - energy_midpoint;
return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
}
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
index a30a449..346b4c7 100644
--- a/av1/encoder/aq_variance.h
+++ b/av1/encoder/aq_variance.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_AQ_VARIANCE_H_
-#define VP10_ENCODER_AQ_VARIANCE_H_
+#ifndef AV1_ENCODER_AQ_VARIANCE_H_
+#define AV1_ENCODER_AQ_VARIANCE_H_
#include "av1/encoder/encoder.h"
@@ -17,14 +17,14 @@
extern "C" {
#endif
-unsigned int vp10_vaq_segment_id(int energy);
-void vp10_vaq_frame_setup(VP10_COMP *cpi);
+unsigned int av1_vaq_segment_id(int energy);
+void av1_vaq_frame_setup(AV1_COMP *cpi);
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_VARIANCE_H_
+#endif // AV1_ENCODER_AQ_VARIANCE_H_
diff --git a/av1/encoder/arm/neon/dct_neon.c b/av1/encoder/arm/neon/dct_neon.c
index 1d77bec..3626e79 100644
--- a/av1/encoder/arm/neon/dct_neon.c
+++ b/av1/encoder/arm/neon/dct_neon.c
@@ -10,24 +10,26 @@
#include <arm_neon.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/blockd.h"
#include "aom_dsp/txfm_common.h"
-void vp10_fdct8x8_quant_neon(
- const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
- int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void av1_fdct8x8_quant_neon(const int16_t *input, int stride,
+ int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
int16_t temp_buffer[64];
(void)coeff_ptr;
- vpx_fdct8x8_neon(input, temp_buffer, stride);
- vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
- quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
- dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+ aom_fdct8x8_neon(input, temp_buffer, stride);
+ av1_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+ quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+ dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
}
diff --git a/av1/encoder/arm/neon/error_neon.c b/av1/encoder/arm/neon/error_neon.c
index 34805d3..65372b5 100644
--- a/av1/encoder/arm/neon/error_neon.c
+++ b/av1/encoder/arm/neon/error_neon.c
@@ -11,10 +11,10 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
-int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
- int block_size) {
+int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+ int block_size) {
int64x2_t error = vdupq_n_s64(0);
assert(block_size >= 8);
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
index db85b4d..5aeead1 100644
--- a/av1/encoder/arm/neon/quantize_neon.c
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -12,7 +12,7 @@
#include <math.h>
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/quant_common.h"
#include "av1/common/seg_common.h"
@@ -21,13 +21,13 @@
#include "av1/encoder/quantize.h"
#include "av1/encoder/rd.h"
-void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
- int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan,
- const int16_t *iscan) {
+void av1_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 30699b4..305a672 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -12,10 +12,10 @@
#include <limits.h>
#include <stdio.h>
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
#include "aom_dsp/bitwriter_buffer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem_ops.h"
#include "aom_ports/system_state.h"
#include "aom_util/debug_util.h"
@@ -46,28 +46,28 @@
#include "av1/encoder/subexp.h"
#include "av1/encoder/tokenize.h"
-static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+static const struct av1_token intra_mode_encodings[INTRA_MODES] = {
{ 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
{ 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
};
#if CONFIG_EXT_INTERP
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
{ { 0, 1 }, { 4, 3 }, { 6, 3 }, { 5, 3 }, { 7, 3 } };
#else
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
{ { 0, 1 }, { 2, 2 }, { 3, 2 } };
#endif // CONFIG_EXT_INTERP
#if CONFIG_EXT_PARTITION_TYPES
-static const struct vp10_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
+static const struct av1_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
{ 0, 1 }, { 4, 3 }, { 12, 4 }, { 7, 3 },
{ 10, 4 }, { 11, 4 }, { 26, 5 }, { 27, 5 }
};
#endif
-static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+static const struct av1_token partition_encodings[PARTITION_TYPES] = {
{ 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
};
#if !CONFIG_REF_MV
-static const struct vp10_token inter_mode_encodings[INTER_MODES] =
+static const struct av1_token inter_mode_encodings[INTER_MODES] =
#if CONFIG_EXT_INTER
{ { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 } };
#else
@@ -75,16 +75,16 @@
#endif // CONFIG_EXT_INTER
#endif
#if CONFIG_EXT_INTER
-static const struct vp10_token
+static const struct av1_token
inter_compound_mode_encodings[INTER_COMPOUND_MODES] = {
{ 2, 2 }, { 50, 6 }, { 51, 6 }, { 24, 5 }, { 52, 6 },
{ 53, 6 }, { 54, 6 }, { 55, 6 }, { 0, 1 }, { 7, 3 }
};
#endif // CONFIG_EXT_INTER
-static const struct vp10_token palette_size_encodings[] = {
+static const struct av1_token palette_size_encodings[] = {
{ 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 30, 5 }, { 62, 6 }, { 63, 6 },
};
-static const struct vp10_token
+static const struct av1_token
palette_color_encodings[PALETTE_MAX_SIZE - 1][PALETTE_MAX_SIZE] = {
{ { 0, 1 }, { 1, 1 } }, // 2 colors
{ { 0, 1 }, { 2, 2 }, { 3, 2 } }, // 3 colors
@@ -113,84 +113,84 @@
{ 127, 7 } }, // 8 colors
};
-static const struct vp10_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
+static const struct av1_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
{ { 0, 1 }, { 1, 1 } }, // Max tx_size is 8X8
{ { 0, 1 }, { 2, 2 }, { 3, 2 } }, // Max tx_size is 16X16
{ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } }, // Max tx_size is 32X32
};
-static INLINE void write_uniform(vp10_writer *w, int n, int v) {
+static INLINE void write_uniform(aom_writer *w, int n, int v) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
if (l == 0) return;
if (v < m) {
- vp10_write_literal(w, v, l - 1);
+ aom_write_literal(w, v, l - 1);
} else {
- vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
- vp10_write_literal(w, (v - m) & 1, 1);
+ aom_write_literal(w, m + ((v - m) >> 1), l - 1);
+ aom_write_literal(w, (v - m) & 1, 1);
}
}
#if CONFIG_EXT_TX
-static struct vp10_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
-static struct vp10_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
+static struct av1_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
+static struct av1_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
#else
-static struct vp10_token ext_tx_encodings[TX_TYPES];
+static struct av1_token ext_tx_encodings[TX_TYPES];
#endif // CONFIG_EXT_TX
#if CONFIG_GLOBAL_MOTION
-static struct vp10_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
+static struct av1_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
#endif // CONFIG_GLOBAL_MOTION
#if CONFIG_EXT_INTRA
-static struct vp10_token intra_filter_encodings[INTRA_FILTERS];
+static struct av1_token intra_filter_encodings[INTRA_FILTERS];
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
-static struct vp10_token interintra_mode_encodings[INTERINTRA_MODES];
+static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static struct vp10_token motvar_encodings[MOTION_VARIATIONS];
+static struct av1_token motvar_encodings[MOTION_VARIATIONS];
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-void vp10_encode_token_init(void) {
+void av1_encode_token_init(void) {
#if CONFIG_EXT_TX
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
- vp10_tokens_from_tree(ext_tx_inter_encodings[s], vp10_ext_tx_inter_tree[s]);
+ av1_tokens_from_tree(ext_tx_inter_encodings[s], av1_ext_tx_inter_tree[s]);
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
- vp10_tokens_from_tree(ext_tx_intra_encodings[s], vp10_ext_tx_intra_tree[s]);
+ av1_tokens_from_tree(ext_tx_intra_encodings[s], av1_ext_tx_intra_tree[s]);
}
#else
- vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+ av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA
- vp10_tokens_from_tree(intra_filter_encodings, vp10_intra_filter_tree);
+ av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
- vp10_tokens_from_tree(interintra_mode_encodings, vp10_interintra_mode_tree);
+ av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
- vp10_tokens_from_tree(motvar_encodings, vp10_motvar_tree);
+ av1_tokens_from_tree(motvar_encodings, av1_motvar_tree);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
#if CONFIG_GLOBAL_MOTION
- vp10_tokens_from_tree(global_motion_types_encodings,
- vp10_global_motion_types_tree);
+ av1_tokens_from_tree(global_motion_types_encodings,
+ av1_global_motion_types_tree);
#endif // CONFIG_GLOBAL_MOTION
}
-static void write_intra_mode(vp10_writer *w, PREDICTION_MODE mode,
- const vpx_prob *probs) {
- vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
+ const aom_prob *probs) {
+ av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
#if CONFIG_EXT_INTER
-static void write_interintra_mode(vp10_writer *w, INTERINTRA_MODE mode,
- const vpx_prob *probs) {
- vp10_write_token(w, vp10_interintra_mode_tree, probs,
- &interintra_mode_encodings[mode]);
+static void write_interintra_mode(aom_writer *w, INTERINTRA_MODE mode,
+ const aom_prob *probs) {
+ av1_write_token(w, av1_interintra_mode_tree, probs,
+ &interintra_mode_encodings[mode]);
}
#endif // CONFIG_EXT_INTER
-static void write_inter_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_mode(AV1_COMMON *cm, aom_writer *w,
PREDICTION_MODE mode,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
int is_compound,
@@ -198,53 +198,53 @@
const int16_t mode_ctx) {
#if CONFIG_REF_MV
const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
- const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+ const aom_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
#if CONFIG_EXT_INTER
- vp10_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
+ aom_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
if (!is_compound && (mode == NEWMV || mode == NEWFROMNEARMV))
- vp10_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
+ aom_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
if (mode != NEWMV && mode != NEWFROMNEARMV) {
#else
- vp10_write(w, mode != NEWMV, newmv_prob);
+ aom_write(w, mode != NEWMV, newmv_prob);
if (mode != NEWMV) {
#endif // CONFIG_EXT_INTER
const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
- const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+ const aom_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
assert(mode == ZEROMV);
return;
}
- vp10_write(w, mode != ZEROMV, zeromv_prob);
+ aom_write(w, mode != ZEROMV, zeromv_prob);
if (mode != ZEROMV) {
int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
- vpx_prob refmv_prob;
+ aom_prob refmv_prob;
if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
refmv_prob = cm->fc->refmv_prob[refmv_ctx];
- vp10_write(w, mode != NEARESTMV, refmv_prob);
+ aom_write(w, mode != NEARESTMV, refmv_prob);
}
}
#else
- const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
+ const aom_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
assert(is_inter_mode(mode));
- vp10_write_token(w, vp10_inter_mode_tree, inter_probs,
- &inter_mode_encodings[INTER_OFFSET(mode)]);
+ av1_write_token(w, av1_inter_mode_tree, inter_probs,
+ &inter_mode_encodings[INTER_OFFSET(mode)]);
#endif
}
#if CONFIG_REF_MV
-static void write_drl_idx(const VP10_COMMON *cm, const MB_MODE_INFO *mbmi,
- const MB_MODE_INFO_EXT *mbmi_ext, vp10_writer *w) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void write_drl_idx(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
+ const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
assert(mbmi->ref_mv_idx < 3);
@@ -253,10 +253,10 @@
for (idx = 0; idx < 2; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- vp10_write(w, mbmi->ref_mv_idx != idx, drl_prob);
+ aom_write(w, mbmi->ref_mv_idx != idx, drl_prob);
if (mbmi->ref_mv_idx == idx) return;
}
}
@@ -269,10 +269,10 @@
for (idx = 1; idx < 3; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- vp10_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
+ aom_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
if (mbmi->ref_mv_idx == (idx - 1)) return;
}
}
@@ -282,40 +282,40 @@
#endif
#if CONFIG_EXT_INTER
-static void write_inter_compound_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_compound_mode(AV1_COMMON *cm, aom_writer *w,
PREDICTION_MODE mode,
const int16_t mode_ctx) {
- const vpx_prob *const inter_compound_probs =
+ const aom_prob *const inter_compound_probs =
cm->fc->inter_compound_mode_probs[mode_ctx];
assert(is_inter_compound_mode(mode));
- vp10_write_token(w, vp10_inter_compound_mode_tree, inter_compound_probs,
- &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
+ av1_write_token(w, av1_inter_compound_mode_tree, inter_compound_probs,
+ &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
}
#endif // CONFIG_EXT_INTER
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
int max) {
- vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
+ aom_wb_write_literal(wb, data, get_unsigned_bits(max));
}
-static void prob_diff_update(const vpx_tree_index *tree,
- vpx_prob probs[/*n - 1*/],
+static void prob_diff_update(const aom_tree_index *tree,
+ aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
- vp10_writer *w) {
+ aom_writer *w) {
int i;
unsigned int branch_ct[32][2];
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i)
- vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+ av1_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
}
-static int prob_diff_update_savings(const vpx_tree_index *tree,
- vpx_prob probs[/*n - 1*/],
+static int prob_diff_update_savings(const aom_tree_index *tree,
+ aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/],
int n) {
int i;
@@ -324,17 +324,17 @@
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i) {
- savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+ savings += av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
}
return savings;
}
#if CONFIG_VAR_TX
-static void write_tx_size_vartx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_tx_size_vartx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
- int blk_row, int blk_col, vp10_writer *w) {
+ int blk_row, int blk_col, aom_writer *w) {
const int tx_row = blk_row >> 1;
const int tx_col = blk_col >> 1;
int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
@@ -348,14 +348,14 @@
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
- vp10_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
+ aom_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
txfm_partition_update(xd->above_txfm_context + tx_col,
xd->left_txfm_context + tx_row, tx_size);
} else {
const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
int bsl = b_width_log2_lookup[bsize];
int i;
- vp10_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
+ aom_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
if (tx_size == TX_8X8) {
txfm_partition_update(xd->above_txfm_context + tx_col,
@@ -373,17 +373,17 @@
}
}
-static void update_txfm_partition_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_partition_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int k;
for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
- vp10_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
- counts->txfm_partition[k]);
+ av1_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
+ counts->txfm_partition[k]);
}
#endif
-static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
// For sub8x8 blocks the tx_size symbol does not need to be sent
@@ -401,89 +401,87 @@
IMPLIES(is_rect_tx(tx_size), tx_size == max_txsize_rect_lookup[bsize]));
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- vp10_write_token(w, vp10_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
- &tx_size_encodings[tx_size_cat][coded_tx_size]);
+ av1_write_token(w, av1_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
+ &tx_size_encodings[tx_size_cat][coded_tx_size]);
}
}
#if CONFIG_REF_MV
-static void update_inter_mode_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_inter_mode_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int i;
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->newmv_prob[i],
- counts->newmv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->newmv_prob[i], counts->newmv_mode[i]);
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
- counts->zeromv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
+ counts->zeromv_mode[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
- counts->refmv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->refmv_prob[i], counts->refmv_mode[i]);
for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
#if CONFIG_EXT_INTER
- vp10_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
+ av1_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
#endif // CONFIG_EXT_INTER
}
#endif
#if CONFIG_EXT_INTER
-static void update_inter_compound_mode_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_inter_compound_mode_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i;
int savings = 0;
int do_update = 0;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
savings += prob_diff_update_savings(
- vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+ av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
prob_diff_update(
- vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+ av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, w);
}
}
}
#endif // CONFIG_EXT_INTER
-static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- int segment_id, const MODE_INFO *mi, vp10_writer *w) {
+static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ int segment_id, const MODE_INFO *mi, aom_writer *w) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int skip = mi->mbmi.skip;
- vp10_write(w, skip, vp10_get_skip_prob(cm, xd));
+ aom_write(w, skip, av1_get_skip_prob(cm, xd));
return skip;
}
}
-static void update_skip_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int k;
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+ av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
}
-static void update_switchable_interp_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
- prob_diff_update(vp10_switchable_interp_tree,
+ prob_diff_update(av1_switchable_interp_tree,
cm->fc->switchable_interp_prob[j],
counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
}
#if CONFIG_EXT_TX
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
@@ -492,16 +490,16 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
savings += prob_diff_update_savings(
- vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+ av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s]);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
prob_diff_update(
- vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+ av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s], w);
}
}
@@ -514,17 +512,17 @@
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
savings += prob_diff_update_savings(
- vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+ av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s]);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
prob_diff_update(
- vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+ av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s], w);
}
}
@@ -533,9 +531,9 @@
#else
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int savings = 0;
@@ -543,43 +541,43 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
savings += prob_diff_update_savings(
- vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
}
}
savings = 0;
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
savings +=
- prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES, w);
}
}
}
#endif // CONFIG_EXT_TX
-static void pack_palette_tokens(vp10_writer *w, const TOKENEXTRA **tp, int n,
+static void pack_palette_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
int num) {
int i;
const TOKENEXTRA *p = *tp;
for (i = 0; i < num; ++i) {
- vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
- &palette_color_encodings[n - 2][p->token]);
+ av1_write_token(w, av1_palette_color_tree[n - 2], p->context_tree,
+ &palette_color_encodings[n - 2][p->token]);
++p;
}
@@ -587,25 +585,25 @@
}
#if CONFIG_SUPERTX
-static void update_supertx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_supertx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int savings = 0;
int do_update = 0;
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- savings += vp10_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
- cm->counts.supertx[i][j]);
+ savings += av1_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
+ cm->counts.supertx[i][j]);
}
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- vp10_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
- cm->counts.supertx[i][j]);
+ av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
+ cm->counts.supertx[i][j]);
}
}
}
@@ -613,9 +611,9 @@
#endif // CONFIG_SUPERTX
#if !CONFIG_ANS
-static void pack_mb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
const TOKENEXTRA *const stop,
- vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+ aom_bit_depth_t bit_depth, const TX_SIZE tx) {
const TOKENEXTRA *p = *tp;
#if CONFIG_VAR_TX
int count = 0;
@@ -624,39 +622,39 @@
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
- const struct vp10_token *const a = &vp10_coef_encodings[t];
+ const struct av1_token *const a = &av1_coef_encodings[t];
int v = a->value;
int n = a->len;
-#if CONFIG_VP9_HIGHBITDEPTH
- const vp10_extra_bit *b;
- if (bit_depth == VPX_BITS_12)
- b = &vp10_extra_bits_high12[t];
- else if (bit_depth == VPX_BITS_10)
- b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+ const av1_extra_bit *b;
+ if (bit_depth == AOM_BITS_12)
+ b = &av1_extra_bits_high12[t];
+ else if (bit_depth == AOM_BITS_10)
+ b = &av1_extra_bits_high10[t];
else
- b = &vp10_extra_bits[t];
+ b = &av1_extra_bits[t];
#else
- const vp10_extra_bit *const b = &vp10_extra_bits[t];
+ const av1_extra_bit *const b = &av1_extra_bits[t];
(void)bit_depth;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* skip one or two nodes */
if (p->skip_eob_node)
n -= p->skip_eob_node;
else
- vp10_write(w, t != EOB_TOKEN, p->context_tree[0]);
+ aom_write(w, t != EOB_TOKEN, p->context_tree[0]);
if (t != EOB_TOKEN) {
- vp10_write(w, t != ZERO_TOKEN, p->context_tree[1]);
+ aom_write(w, t != ZERO_TOKEN, p->context_tree[1]);
if (t != ZERO_TOKEN) {
- vp10_write(w, t != ONE_TOKEN, p->context_tree[2]);
+ aom_write(w, t != ONE_TOKEN, p->context_tree[2]);
if (t != ONE_TOKEN) {
int len = UNCONSTRAINED_NODES - p->skip_eob_node;
- vp10_write_tree(w, vp10_coef_con_tree,
- vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
- n - len, 0);
+ av1_write_tree(w, av1_coef_con_tree,
+ av1_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+ n - len, 0);
}
}
}
@@ -679,13 +677,13 @@
skip_bits--;
assert(!bb);
} else {
- vp10_write(w, bb, pb[i >> 1]);
+ aom_write(w, bb, pb[i >> 1]);
}
i = b->tree[i + bb];
} while (n);
}
- vp10_write_bit(w, e & 1);
+ aom_write_bit(w, e & 1);
}
++p;
@@ -702,7 +700,7 @@
// coder.
static void pack_mb_tokens(struct BufAnsCoder *ans, const TOKENEXTRA **tp,
const TOKENEXTRA *const stop,
- vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+ aom_bit_depth_t bit_depth, const TX_SIZE tx) {
const TOKENEXTRA *p = *tp;
#if CONFIG_VAR_TX
int count = 0;
@@ -711,18 +709,18 @@
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
-#if CONFIG_VP9_HIGHBITDEPTH
- const vp10_extra_bit *b;
- if (bit_depth == VPX_BITS_12)
- b = &vp10_extra_bits_high12[t];
- else if (bit_depth == VPX_BITS_10)
- b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+ const av1_extra_bit *b;
+ if (bit_depth == AOM_BITS_12)
+ b = &av1_extra_bits_high12[t];
+ else if (bit_depth == AOM_BITS_10)
+ b = &av1_extra_bits_high10[t];
else
- b = &vp10_extra_bits[t];
+ b = &av1_extra_bits[t];
#else
- const vp10_extra_bit *const b = &vp10_extra_bits[t];
+ const av1_extra_bit *const b = &av1_extra_bits[t];
(void)bit_depth;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* skip one or two nodes */
if (!p->skip_eob_node)
@@ -776,10 +774,10 @@
#endif // !CONFIG_ANS
#if CONFIG_VAR_TX
-static void pack_txb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, int plane,
- BLOCK_SIZE plane_bsize, vpx_bit_depth_t bit_depth,
+ BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
int block, int blk_row, int blk_col,
TX_SIZE tx_size) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -824,16 +822,16 @@
}
#endif
-static void write_segment_id(vp10_writer *w, const struct segmentation *seg,
+static void write_segment_id(aom_writer *w, const struct segmentation *seg,
const struct segmentation_probs *segp,
int segment_id) {
if (seg->enabled && seg->update_map)
- vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+ av1_write_tree(w, av1_segment_tree, segp->tree_probs, segment_id, 3, 0);
}
// This function encodes the reference frame
-static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
const int segment_id = mbmi->segment_id;
@@ -848,7 +846,7 @@
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- vp10_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+ aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
} else {
assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
}
@@ -862,47 +860,47 @@
const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
#endif // CONFIG_EXT_REFS
- vp10_write(w, bit, vp10_get_pred_prob_comp_ref_p(cm, xd));
+ aom_write(w, bit, av1_get_pred_prob_comp_ref_p(cm, xd));
#if CONFIG_EXT_REFS
if (!bit) {
const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_comp_ref_p1(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_comp_ref_p1(cm, xd));
} else {
const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
- vp10_write(w, bit2, vp10_get_pred_prob_comp_ref_p2(cm, xd));
+ aom_write(w, bit2, av1_get_pred_prob_comp_ref_p2(cm, xd));
}
- vp10_write(w, bit_bwd, vp10_get_pred_prob_comp_bwdref_p(cm, xd));
+ aom_write(w, bit_bwd, av1_get_pred_prob_comp_bwdref_p(cm, xd));
#endif // CONFIG_EXT_REFS
} else {
#if CONFIG_EXT_REFS
const int bit0 = (mbmi->ref_frame[0] == ALTREF_FRAME ||
mbmi->ref_frame[0] == BWDREF_FRAME);
- vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+ aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
if (bit0) {
const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
} else {
const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
mbmi->ref_frame[0] == GOLDEN_FRAME);
- vp10_write(w, bit2, vp10_get_pred_prob_single_ref_p3(cm, xd));
+ aom_write(w, bit2, av1_get_pred_prob_single_ref_p3(cm, xd));
if (!bit2) {
const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
- vp10_write(w, bit3, vp10_get_pred_prob_single_ref_p4(cm, xd));
+ aom_write(w, bit3, av1_get_pred_prob_single_ref_p4(cm, xd));
} else {
const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
- vp10_write(w, bit4, vp10_get_pred_prob_single_ref_p5(cm, xd));
+ aom_write(w, bit4, av1_get_pred_prob_single_ref_p5(cm, xd));
}
}
#else // CONFIG_EXT_REFS
const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
- vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+ aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
if (bit0) {
const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
}
#endif // CONFIG_EXT_REFS
}
@@ -910,15 +908,15 @@
}
#if CONFIG_EXT_INTRA
-static void write_ext_intra_mode_info(const VP10_COMMON *const cm,
+static void write_ext_intra_mode_info(const AV1_COMMON *const cm,
const MB_MODE_INFO *const mbmi,
- vp10_writer *w) {
+ aom_writer *w) {
#if !ALLOW_FILTER_INTRA_MODES
return;
#endif
if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
- vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
- cm->fc->ext_intra_probs[0]);
+ aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
+ cm->fc->ext_intra_probs[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[0];
write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -927,8 +925,8 @@
if (mbmi->uv_mode == DC_PRED &&
mbmi->palette_mode_info.palette_size[1] == 0) {
- vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
- cm->fc->ext_intra_probs[1]);
+ aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
+ cm->fc->ext_intra_probs[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[1];
write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -936,11 +934,11 @@
}
}
-static void write_intra_angle_info(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_intra_angle_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
int p_angle;
if (bsize < BLOCK_8X8) return;
@@ -949,10 +947,10 @@
write_uniform(w, 2 * MAX_ANGLE_DELTAS + 1,
MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
- vp10_write_token(w, vp10_intra_filter_tree,
- cm->fc->intra_filter_probs[intra_filter_ctx],
- &intra_filter_encodings[mbmi->intra_filter]);
+ if (av1_is_intra_filter_switchable(p_angle)) {
+ av1_write_token(w, av1_intra_filter_tree,
+ cm->fc->intra_filter_probs[intra_filter_ctx],
+ &intra_filter_encodings[mbmi->intra_filter]);
}
}
@@ -963,10 +961,9 @@
}
#endif // CONFIG_EXT_INTRA
-static void write_switchable_interp_filter(VP10_COMP *cpi,
- const MACROBLOCKD *xd,
- vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_switchable_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
+ aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
#if CONFIG_DUAL_FILTER
int dir;
@@ -974,12 +971,12 @@
if (cm->interp_filter == SWITCHABLE) {
#if CONFIG_EXT_INTERP
#if CONFIG_DUAL_FILTER
- if (!vp10_is_interp_needed(xd)) {
+ if (!av1_is_interp_needed(xd)) {
assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
return;
}
#else
- if (!vp10_is_interp_needed(xd)) {
+ if (!av1_is_interp_needed(xd)) {
#if CONFIG_DUAL_FILTER
assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
assert(mbmi->interp_filter[1] == EIGHTTAP_REGULAR);
@@ -995,28 +992,27 @@
if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
- vp10_write_token(
- w, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx],
- &switchable_interp_encodings[mbmi->interp_filter[dir]]);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
+ av1_write_token(w, av1_switchable_interp_tree,
+ cm->fc->switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mbmi->interp_filter[dir]]);
++cpi->interp_filter_selected[0][mbmi->interp_filter[dir]];
}
}
#else
{
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
- vp10_write_token(w, vp10_switchable_interp_tree,
- cm->fc->switchable_interp_prob[ctx],
- &switchable_interp_encodings[mbmi->interp_filter]);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
+ av1_write_token(w, av1_switchable_interp_tree,
+ cm->fc->switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mbmi->interp_filter]);
++cpi->interp_filter_selected[0][mbmi->interp_filter];
}
#endif
}
}
-static void write_palette_mode_info(const VP10_COMMON *cm,
- const MACROBLOCKD *xd,
- const MODE_INFO *const mi, vp10_writer *w) {
+static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ const MODE_INFO *const mi, aom_writer *w) {
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
@@ -1031,44 +1027,43 @@
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- vp10_write(
- w, n > 0,
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
+ aom_write(w, n > 0,
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
if (n > 0) {
- vp10_write_token(w, vp10_palette_size_tree,
- vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
- &palette_size_encodings[n - 2]);
+ av1_write_token(w, av1_palette_size_tree,
+ av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
+ &palette_size_encodings[n - 2]);
for (i = 0; i < n; ++i)
- vp10_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
write_uniform(w, n, pmi->palette_first_color_idx[0]);
}
}
if (mbmi->uv_mode == DC_PRED) {
n = pmi->palette_size[1];
- vp10_write(w, n > 0,
- vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
+ aom_write(w, n > 0,
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
if (n > 0) {
- vp10_write_token(w, vp10_palette_size_tree,
- vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8],
- &palette_size_encodings[n - 2]);
+ av1_write_token(w, av1_palette_size_tree,
+ av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
+ &palette_size_encodings[n - 2]);
for (i = 0; i < n; ++i) {
- vp10_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
- cm->bit_depth);
- vp10_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
- cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
+ cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
+ cm->bit_depth);
}
write_uniform(w, n, pmi->palette_first_color_idx[1]);
}
}
}
-static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
- vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+ aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
#if !CONFIG_REF_MV
const nmv_context *nmvc = &cm->fc->nmvc;
#endif
@@ -1089,8 +1084,8 @@
if (seg->update_map) {
if (seg->temporal_update) {
const int pred_flag = mbmi->seg_id_predicted;
- vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
- vp10_write(w, pred_flag, pred_prob);
+ aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
+ aom_write(w, pred_flag, pred_prob);
if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
} else {
write_segment_id(w, seg, segp, segment_id);
@@ -1110,7 +1105,7 @@
if (!supertx_enabled)
#endif // CONFIG_SUPERTX
if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- vp10_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+ aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
#if CONFIG_SUPERTX
@@ -1172,8 +1167,8 @@
mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
else
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, -1);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, -1);
#endif
// If segment skip is not enabled code the mode.
@@ -1213,8 +1208,8 @@
#if CONFIG_EXT_INTER
if (!is_compound)
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, j);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, j);
#endif
#if CONFIG_EXT_INTER
if (is_inter_compound_mode(b_mode))
@@ -1236,53 +1231,53 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
- &mi->bmi[j].ref_mv[ref].as_mv,
+ &mi->bmi[j].ref_mv[ref].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
#else
#if CONFIG_REF_MV
- &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
+ &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
#else
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
#endif // CONFIG_REF_MV
#endif // CONFIG_EXT_INTER
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
}
#if CONFIG_EXT_INTER
else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
- &mi->bmi[j].ref_mv[1].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
+ &mi->bmi[j].ref_mv[1].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
} else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
- &mi->bmi[j].ref_mv[0].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
+ &mi->bmi[j].ref_mv[0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
#endif // CONFIG_EXT_INTER
}
@@ -1297,52 +1292,52 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
#if CONFIG_EXT_INTER
if (mode == NEWFROMNEARMV)
- vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
else
#endif // CONFIG_EXT_INTER
- vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
#if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
#endif // CONFIG_EXT_INTER
}
}
@@ -1355,16 +1350,16 @@
is_interintra_allowed(mbmi)) {
const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
const int bsize_group = size_group_lookup[bsize];
- vp10_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
+ aom_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
if (interintra) {
write_interintra_mode(w, mbmi->interintra_mode,
cm->fc->interintra_mode_prob[bsize_group]);
if (is_interintra_wedge_used(bsize)) {
- vp10_write(w, mbmi->use_wedge_interintra,
- cm->fc->wedge_interintra_prob[bsize]);
+ aom_write(w, mbmi->use_wedge_interintra,
+ cm->fc->wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
- vp10_write_literal(w, mbmi->interintra_wedge_index,
- get_wedge_bits_lookup(bsize));
+ aom_write_literal(w, mbmi->interintra_wedge_index,
+ get_wedge_bits_lookup(bsize));
assert(mbmi->interintra_wedge_sign == 0);
}
}
@@ -1384,8 +1379,8 @@
// is not active, and assume SIMPLE_TRANSLATION in the decoder if
// it is active.
assert(mbmi->motion_variation < MOTION_VARIATIONS);
- vp10_write_token(w, vp10_motvar_tree, cm->fc->motvar_prob[bsize],
- &motvar_encodings[mbmi->motion_variation]);
+ av1_write_token(w, av1_motvar_tree, cm->fc->motvar_prob[bsize],
+ &motvar_encodings[mbmi->motion_variation]);
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -1397,12 +1392,12 @@
mbmi->motion_variation != SIMPLE_TRANSLATION) &&
#endif // CONFIG_OBMC
is_interinter_wedge_used(bsize)) {
- vp10_write(w, mbmi->use_wedge_interinter,
- cm->fc->wedge_interinter_prob[bsize]);
+ aom_write(w, mbmi->use_wedge_interinter,
+ cm->fc->wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter) {
- vp10_write_literal(w, mbmi->interinter_wedge_index,
- get_wedge_bits_lookup(bsize));
- vp10_write_bit(w, mbmi->interinter_wedge_sign);
+ aom_write_literal(w, mbmi->interinter_wedge_index,
+ get_wedge_bits_lookup(bsize));
+ aom_write_bit(w, mbmi->interinter_wedge_sign);
}
}
#endif // CONFIG_EXT_INTER
@@ -1424,14 +1419,14 @@
if (is_inter) {
assert(ext_tx_used_inter[eset][mbmi->tx_type]);
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_inter_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_inter_tree[eset],
cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
&ext_tx_inter_encodings[eset][mbmi->tx_type]);
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_intra_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
&ext_tx_intra_encodings[eset][mbmi->tx_type]);
}
@@ -1443,12 +1438,12 @@
#endif // CONFIG_SUPERTX
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
if (is_inter) {
- vp10_write_token(w, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[mbmi->tx_size],
- &ext_tx_encodings[mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+ &ext_tx_encodings[mbmi->tx_type]);
} else {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob
[mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
@@ -1465,8 +1460,8 @@
}
}
-static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- MODE_INFO **mi_8x8, vp10_writer *w) {
+static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO **mi_8x8, aom_writer *w) {
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
const MODE_INFO *const mi = mi_8x8[0];
@@ -1518,16 +1513,16 @@
ALLOW_INTRA_EXT_TX) {
int eset = get_ext_tx_set(mbmi->tx_size, bsize, 0);
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_intra_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
&ext_tx_intra_encodings[eset][mbmi->tx_type]);
}
#else
if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size]
[intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
@@ -1546,14 +1541,14 @@
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
#endif // CONFIG_ANS && CONFIG_SUPERTX
-static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
- vp10_writer *w, const TOKENEXTRA **tok,
+static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
+ aom_writer *w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
int mi_row, int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
int plane;
@@ -1584,8 +1579,8 @@
xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
#endif
#if CONFIG_EXT_INTERP
- // vp10_is_interp_needed needs the ref frame buffers set up to look
- // up if they are scaled. vp10_is_interp_needed is in turn needed by
+ // av1_is_interp_needed needs the ref frame buffers set up to look
+ // up if they are scaled. av1_is_interp_needed is in turn needed by
// write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
#endif // CONFIG_EXT_INTERP
@@ -1645,7 +1640,7 @@
MB_MODE_INFO *mbmi = &m->mbmi;
BLOCK_SIZE bsize = mbmi->sb_type;
const BLOCK_SIZE plane_bsize =
- get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+ get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -1692,31 +1687,31 @@
}
}
-static void write_partition(const VP10_COMMON *const cm,
+static void write_partition(const AV1_COMMON *const cm,
const MACROBLOCKD *const xd, int hbs, int mi_row,
int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
- vp10_writer *w) {
+ aom_writer *w) {
const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
- const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+ const aom_prob *const probs = cm->fc->partition_prob[ctx];
const int has_rows = (mi_row + hbs) < cm->mi_rows;
const int has_cols = (mi_col + hbs) < cm->mi_cols;
if (has_rows && has_cols) {
#if CONFIG_EXT_PARTITION_TYPES
if (bsize <= BLOCK_8X8)
- vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+ av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
else
- vp10_write_token(w, vp10_ext_partition_tree, probs,
- &ext_partition_encodings[p]);
+ av1_write_token(w, av1_ext_partition_tree, probs,
+ &ext_partition_encodings[p]);
#else
- vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+ av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
#endif // CONFIG_EXT_PARTITION_TYPES
} else if (!has_rows && has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
- vp10_write(w, p == PARTITION_SPLIT, probs[1]);
+ aom_write(w, p == PARTITION_SPLIT, probs[1]);
} else if (has_rows && !has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
- vp10_write(w, p == PARTITION_SPLIT, probs[2]);
+ aom_write(w, p == PARTITION_SPLIT, probs[2]);
} else {
assert(p == PARTITION_SPLIT);
}
@@ -1733,14 +1728,14 @@
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
#endif // CONFIG_ANS && CONFIG_SUPERTX
-static void write_modes_sb(VP10_COMP *const cpi, const TileInfo *const tile,
- vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
+ aom_writer *const w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
int mi_row, int mi_col, BLOCK_SIZE bsize) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
@@ -1764,12 +1759,12 @@
if (!supertx_enabled && !frame_is_intra_only(cm) &&
partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
!xd->lossless[0]) {
- vpx_prob prob;
+ aom_prob prob;
supertx_size = max_txsize_lookup[bsize];
prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
[supertx_size];
supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
- vp10_write(w, supertx_enabled, prob);
+ aom_write(w, supertx_enabled, prob);
}
#endif // CONFIG_SUPERTX
if (subsize < BLOCK_8X8) {
@@ -1858,16 +1853,16 @@
if (get_ext_tx_types(supertx_size, bsize, 1) > 1 && !skip) {
int eset = get_ext_tx_set(supertx_size, bsize, 1);
if (eset > 0) {
- vp10_write_token(w, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][supertx_size],
- &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][supertx_size],
+ &ext_tx_inter_encodings[eset][mbmi->tx_type]);
}
}
#else
if (supertx_size < TX_32X32 && !skip) {
- vp10_write_token(w, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[supertx_size],
- &ext_tx_encodings[mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[supertx_size],
+ &ext_tx_encodings[mbmi->tx_type]);
}
#endif // CONFIG_EXT_TX
@@ -1904,7 +1899,7 @@
#if DERING_REFINEMENT
if (bsize == BLOCK_64X64 && cm->dering_level != 0 &&
!sb_all_skip(cm, mi_row, mi_col)) {
- vpx_write_literal(
+ aom_write_literal(
w,
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
DERING_REFINEMENT_BITS);
@@ -1913,10 +1908,10 @@
#endif // CONFIG_EXT_PARTITION_TYPES
}
-static void write_modes(VP10_COMP *const cpi, const TileInfo *const tile,
- vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
+ aom_writer *const w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
const int mi_row_start = tile->mi_row_start;
const int mi_row_end = tile->mi_row_end;
@@ -1924,10 +1919,10 @@
const int mi_col_end = tile->mi_col_end;
int mi_row, mi_col;
- vp10_zero_above_context(cm, mi_col_start, mi_col_end);
+ av1_zero_above_context(cm, mi_col_start, mi_col_end);
for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
- vp10_zero_left_context(xd);
+ av1_zero_left_context(xd);
for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
@@ -1936,10 +1931,10 @@
}
}
-static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
- vp10_coeff_stats *coef_branch_ct,
- vp10_coeff_probs_model *coef_probs) {
- vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
+ av1_coeff_stats *coef_branch_ct,
+ av1_coeff_probs_model *coef_probs) {
+ av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cpi->common.counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -1948,9 +1943,9 @@
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
- vp10_tree_probs_from_distribution(vp10_coef_tree,
- coef_branch_ct[i][j][k][l],
- coef_counts[i][j][k][l]);
+ av1_tree_probs_from_distribution(av1_coef_tree,
+ coef_branch_ct[i][j][k][l],
+ coef_counts[i][j][k][l]);
coef_branch_ct[i][j][k][l][0][1] =
eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
@@ -1963,12 +1958,12 @@
}
}
-static void update_coef_probs_common(vp10_writer *const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
TX_SIZE tx_size,
- vp10_coeff_stats *frame_branch_ct,
- vp10_coeff_probs_model *new_coef_probs) {
- vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ av1_coeff_stats *frame_branch_ct,
+ av1_coeff_probs_model *new_coef_probs) {
+ av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+ const aom_prob upd = DIFF_UPDATE_PROB;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -1983,22 +1978,22 @@
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ const aom_prob oldp = old_coef_probs[i][j][k][l][t];
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
if (s > 0 && newp != oldp) u = 1;
if (u)
- savings += s - (int)(vp10_cost_zero(upd));
+ savings += s - (int)(av1_cost_zero(upd));
else
- savings -= (int)(vp10_cost_zero(upd));
+ savings -= (int)(av1_cost_zero(upd));
update[u]++;
}
}
@@ -2008,33 +2003,33 @@
/* Is coef updated at all */
if (update[1] == 0 || savings < 0) {
- vp10_write_bit(bc, 0);
+ aom_write_bit(bc, 0);
return;
}
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
// calc probs and branch cts for this frame only
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ const aom_prob upd = DIFF_UPDATE_PROB;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
if (s > 0 && newp != *oldp) u = 1;
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2054,17 +2049,17 @@
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
// calc probs and branch cts for this frame only
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
int s;
int u = 0;
if (t == PIVOT_NODE) {
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
} else {
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
@@ -2077,14 +2072,14 @@
if (u == 1 && updates == 1) {
int v;
// first update
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (v = 0; v < noupdates_before_first; ++v)
- vp10_write(bc, 0, upd);
+ aom_write(bc, 0, upd);
}
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2093,7 +2088,7 @@
}
}
if (updates == 0) {
- vp10_write_bit(bc, 0); // no updates
+ aom_write_bit(bc, 0); // no updates
}
return;
}
@@ -2104,8 +2099,8 @@
#if CONFIG_ENTROPY
// Calculate the token counts between subsequent subframe updates.
static void get_coef_counts_diff(
- VP10_COMP *cpi, int index,
- vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
+ AV1_COMP *cpi, int index,
+ av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
unsigned int eob_counts[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS]) {
int i, j, k, l, m, tx_size, val;
@@ -2151,11 +2146,11 @@
}
static void update_coef_probs_subframe(
- vp10_writer *const bc, VP10_COMP *cpi, TX_SIZE tx_size,
- vp10_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
- vp10_coeff_probs_model *new_coef_probs) {
- vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_writer *const bc, AV1_COMP *cpi, TX_SIZE tx_size,
+ av1_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
+ av1_coeff_probs_model *new_coef_probs) {
+ av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+ const aom_prob upd = DIFF_UPDATE_PROB;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -2180,22 +2175,22 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ const aom_prob oldp = old_coef_probs[i][j][k][l][t];
int s, u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != oldp) u = 1;
if (u)
- savings += s - (int)(vp10_cost_zero(upd));
+ savings += s - (int)(av1_cost_zero(upd));
else
- savings -= (int)(vp10_cost_zero(upd));
+ savings -= (int)(av1_cost_zero(upd));
update[u]++;
}
}
@@ -2205,10 +2200,10 @@
/* Is coef updated at all */
if (update[1] == 0 || savings < 0) {
- vp10_write_bit(bc, 0);
+ aom_write_bit(bc, 0);
return;
}
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
@@ -2221,24 +2216,24 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ const aom_prob upd = DIFF_UPDATE_PROB;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != *oldp) u = 1;
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2264,18 +2259,18 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != *oldp) u = 1;
updates += u;
if (u == 0 && updates == 0) {
@@ -2285,14 +2280,14 @@
if (u == 1 && updates == 1) {
int v;
// first update
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (v = 0; v < noupdates_before_first; ++v)
- vp10_write(bc, 0, upd);
+ aom_write(bc, 0, upd);
}
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2301,7 +2296,7 @@
}
}
if (updates == 0) {
- vp10_write_bit(bc, 0); // no updates
+ aom_write_bit(bc, 0); // no updates
}
return;
}
@@ -2310,7 +2305,7 @@
}
#endif // CONFIG_ENTROPY
-static void update_coef_probs(VP10_COMP *cpi, vp10_writer *w) {
+static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
@@ -2318,17 +2313,17 @@
int update = 0;
#endif // CONFIG_ANS
#if CONFIG_ENTROPY
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
unsigned int eob_counts_copy[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS];
int i;
- vp10_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
+ av1_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_copy(cpi->common.fc->coef_probs,
- subframe_stats->enc_starting_coef_probs);
+ av1_copy(cpi->common.fc->coef_probs,
+ subframe_stats->enc_starting_coef_probs);
for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
get_coef_counts_diff(cpi, i, cpi->wholeframe_stats.coef_counts_buf[i],
cpi->wholeframe_stats.eob_counts_buf[i]);
@@ -2337,32 +2332,32 @@
#endif // CONFIG_ENTROPY
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
- vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
- vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+ av1_coeff_stats frame_branch_ct[PLANE_TYPES];
+ av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
if (cpi->td.counts->tx_size_totals[tx_size] <= 20 ||
(tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
- vp10_write_bit(w, 0);
+ aom_write_bit(w, 0);
} else {
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
unsigned int
eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
- vp10_coeff_count coef_counts_copy[PLANE_TYPES];
- vp10_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
- vp10_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
+ av1_coeff_count coef_counts_copy[PLANE_TYPES];
+ av1_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
+ av1_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
build_tree_distribution(cpi, tx_size, frame_branch_ct,
frame_coef_probs);
for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
- vp10_copy(cpi->common.counts.eob_branch[tx_size],
- cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
- vp10_copy(cpi->td.rd_counts.coef_counts[tx_size],
- cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
+ av1_copy(cpi->common.counts.eob_branch[tx_size],
+ cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
+ av1_copy(cpi->td.rd_counts.coef_counts[tx_size],
+ cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
build_tree_distribution(cpi, tx_size, cpi->branch_ct_buf[i][tx_size],
dummy_frame_coef_probs);
}
- vp10_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
- vp10_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
+ av1_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
+ av1_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
update_coef_probs_subframe(w, cpi, tx_size, cpi->branch_ct_buf,
frame_coef_probs);
@@ -2385,65 +2380,65 @@
}
#if CONFIG_ENTROPY
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
- vp10_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_copy(eob_counts_copy, cm->counts.eob_branch);
+ av1_copy(eob_counts_copy, cm->counts.eob_branch);
for (i = 1; i <= cpi->common.coef_probs_update_idx; ++i) {
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- vp10_full_to_model_counts(cm->counts.coef[tx_size],
- subframe_stats->coef_counts_buf[i][tx_size]);
- vp10_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
- vp10_partial_adapt_probs(cm, 0, 0);
- vp10_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
+ av1_full_to_model_counts(cm->counts.coef[tx_size],
+ subframe_stats->coef_counts_buf[i][tx_size]);
+ av1_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
+ av1_partial_adapt_probs(cm, 0, 0);
+ av1_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
}
- vp10_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
- vp10_copy(cm->counts.eob_branch, eob_counts_copy);
+ av1_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
+ av1_copy(cm->counts.eob_branch, eob_counts_copy);
}
#endif // CONFIG_ENTROPY
#if CONFIG_ANS
- if (update) vp10_coef_pareto_cdfs(cpi->common.fc);
+ if (update) av1_coef_pareto_cdfs(cpi->common.fc);
#endif // CONFIG_ANS
}
#if CONFIG_LOOP_RESTORATION
-static void encode_restoration(VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void encode_restoration(AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
int i;
RestorationInfo *rst = &cm->rst_info;
- vpx_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
+ aom_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
if (rst->restoration_type != RESTORE_NONE) {
if (rst->restoration_type == RESTORE_BILATERAL) {
- vpx_wb_write_bit(wb, 1);
+ aom_wb_write_bit(wb, 1);
for (i = 0; i < cm->rst_internal.ntiles; ++i) {
if (rst->bilateral_level[i] >= 0) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_literal(wb, rst->bilateral_level[i],
- vp10_bilateral_level_bits(cm));
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_literal(wb, rst->bilateral_level[i],
+ av1_bilateral_level_bits(cm));
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
for (i = 0; i < cm->rst_internal.ntiles; ++i) {
if (rst->wiener_level[i]) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
WIENER_FILT_TAP0_BITS);
- vpx_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
+ aom_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
WIENER_FILT_TAP1_BITS);
- vpx_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
+ aom_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
WIENER_FILT_TAP2_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
WIENER_FILT_TAP0_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
WIENER_FILT_TAP1_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
WIENER_FILT_TAP2_BITS);
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
}
@@ -2451,39 +2446,38 @@
}
#endif // CONFIG_LOOP_RESTORATION
-static void encode_loopfilter(VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void encode_loopfilter(AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
int i;
struct loopfilter *lf = &cm->lf;
// Encode the loop filter level and type
- vpx_wb_write_literal(wb, lf->filter_level, 6);
- vpx_wb_write_literal(wb, lf->sharpness_level, 3);
+ aom_wb_write_literal(wb, lf->filter_level, 6);
+ aom_wb_write_literal(wb, lf->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or
// ref frame (if they are enabled).
- vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
+ aom_wb_write_bit(wb, lf->mode_ref_delta_enabled);
if (lf->mode_ref_delta_enabled) {
- vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
+ aom_wb_write_bit(wb, lf->mode_ref_delta_update);
if (lf->mode_ref_delta_update) {
for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
const int delta = lf->ref_deltas[i];
const int changed = delta != lf->last_ref_deltas[i];
- vpx_wb_write_bit(wb, changed);
+ aom_wb_write_bit(wb, changed);
if (changed) {
lf->last_ref_deltas[i] = delta;
- vpx_wb_write_inv_signed_literal(wb, delta, 6);
+ aom_wb_write_inv_signed_literal(wb, delta, 6);
}
}
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
const int delta = lf->mode_deltas[i];
const int changed = delta != lf->last_mode_deltas[i];
- vpx_wb_write_bit(wb, changed);
+ aom_wb_write_bit(wb, changed);
if (changed) {
lf->last_mode_deltas[i] = delta;
- vpx_wb_write_inv_signed_literal(wb, delta, 6);
+ aom_wb_write_inv_signed_literal(wb, delta, 6);
}
}
}
@@ -2491,84 +2485,83 @@
}
#if CONFIG_CLPF
-static void encode_clpf(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->clpf, 1);
+static void encode_clpf(const AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->clpf, 1);
}
#endif
#if CONFIG_DERING
-static void encode_dering(int level, struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, level, DERING_LEVEL_BITS);
+static void encode_dering(int level, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, level, DERING_LEVEL_BITS);
}
#endif // CONFIG_DERING
-static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
+static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
if (delta_q != 0) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_inv_signed_literal(wb, delta_q, 6);
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_inv_signed_literal(wb, delta_q, 6);
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
-static void encode_quantization(const VP10_COMMON *const cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+static void encode_quantization(const AV1_COMMON *const cm,
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
write_delta_q(wb, cm->y_dc_delta_q);
write_delta_q(wb, cm->uv_dc_delta_q);
write_delta_q(wb, cm->uv_ac_delta_q);
#if CONFIG_AOM_QM
- vpx_wb_write_bit(wb, cm->using_qmatrix);
+ aom_wb_write_bit(wb, cm->using_qmatrix);
if (cm->using_qmatrix) {
- vpx_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
- vpx_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
+ aom_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
+ aom_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
}
#endif
}
-static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
- struct vpx_write_bit_buffer *wb) {
+static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
+ struct aom_write_bit_buffer *wb) {
int i, j;
const struct segmentation *seg = &cm->seg;
- vpx_wb_write_bit(wb, seg->enabled);
+ aom_wb_write_bit(wb, seg->enabled);
if (!seg->enabled) return;
// Segmentation map
if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
- vpx_wb_write_bit(wb, seg->update_map);
+ aom_wb_write_bit(wb, seg->update_map);
} else {
assert(seg->update_map == 1);
}
if (seg->update_map) {
// Select the coding strategy (temporal or spatial)
- vp10_choose_segmap_coding_method(cm, xd);
+ av1_choose_segmap_coding_method(cm, xd);
// Write out the chosen coding method.
if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
- vpx_wb_write_bit(wb, seg->temporal_update);
+ aom_wb_write_bit(wb, seg->temporal_update);
} else {
assert(seg->temporal_update == 0);
}
}
// Segmentation data
- vpx_wb_write_bit(wb, seg->update_data);
+ aom_wb_write_bit(wb, seg->update_data);
if (seg->update_data) {
- vpx_wb_write_bit(wb, seg->abs_delta);
+ aom_wb_write_bit(wb, seg->abs_delta);
for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
const int active = segfeature_active(seg, i, j);
- vpx_wb_write_bit(wb, active);
+ aom_wb_write_bit(wb, active);
if (active) {
const int data = get_segdata(seg, i, j);
- const int data_max = vp10_seg_feature_data_max(j);
+ const int data_max = av1_seg_feature_data_max(j);
- if (vp10_is_segfeature_signed(j)) {
+ if (av1_is_segfeature_signed(j)) {
encode_unsigned_max(wb, abs(data), data_max);
- vpx_wb_write_bit(wb, data < 0);
+ aom_wb_write_bit(wb, data < 0);
} else {
encode_unsigned_max(wb, data, data_max);
}
@@ -2578,8 +2571,8 @@
}
}
-static void update_seg_probs(VP10_COMP *cpi, vp10_writer *w) {
- VP10_COMMON *cm = &cpi->common;
+static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
+ AV1_COMMON *cm = &cpi->common;
if (!cm->seg.enabled || !cm->seg.update_map) return;
@@ -2587,41 +2580,41 @@
int i;
for (i = 0; i < PREDICTION_PROBS; i++)
- vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
- cm->counts.seg.pred[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+ cm->counts.seg.pred[i]);
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
} else {
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_total, MAX_SEGMENTS, w);
}
}
-static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
- if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
+static void write_txfm_mode(TX_MODE mode, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_bit(wb, mode == TX_MODE_SELECT);
+ if (mode != TX_MODE_SELECT) aom_wb_write_literal(wb, mode, 2);
}
-static void update_txfm_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
if (cm->tx_mode == TX_MODE_SELECT) {
int i, j;
for (i = 0; i < TX_SIZES - 1; ++i)
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
- prob_diff_update(vp10_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
+ prob_diff_update(av1_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
counts->tx_size[i][j], i + 2, w);
}
}
static void write_interp_filter(INTERP_FILTER filter,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_bit(wb, filter == SWITCHABLE);
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_bit(wb, filter == SWITCHABLE);
if (filter != SWITCHABLE)
- vpx_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
+ aom_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
}
-static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
if (cm->interp_filter == SWITCHABLE) {
// Check to see if only one of the filters is actually used
int count[SWITCHABLE_FILTERS];
@@ -2644,8 +2637,8 @@
}
}
-static void write_tile_info(const VP10_COMMON *const cm,
- struct vpx_write_bit_buffer *wb) {
+static void write_tile_info(const AV1_COMMON *const cm,
+ struct aom_write_bit_buffer *wb) {
#if CONFIG_EXT_TILE
const int tile_width =
ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
@@ -2662,33 +2655,33 @@
if (cm->sb_size == BLOCK_128X128) {
assert(tile_width <= 32);
assert(tile_height <= 32);
- vpx_wb_write_literal(wb, tile_width - 1, 5);
- vpx_wb_write_literal(wb, tile_height - 1, 5);
+ aom_wb_write_literal(wb, tile_width - 1, 5);
+ aom_wb_write_literal(wb, tile_height - 1, 5);
} else
#endif // CONFIG_EXT_PARTITION
{
assert(tile_width <= 64);
assert(tile_height <= 64);
- vpx_wb_write_literal(wb, tile_width - 1, 6);
- vpx_wb_write_literal(wb, tile_height - 1, 6);
+ aom_wb_write_literal(wb, tile_width - 1, 6);
+ aom_wb_write_literal(wb, tile_height - 1, 6);
}
#else
int min_log2_tile_cols, max_log2_tile_cols, ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
ones = cm->log2_tile_cols - min_log2_tile_cols;
- while (ones--) vpx_wb_write_bit(wb, 1);
+ while (ones--) aom_wb_write_bit(wb, 1);
- if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
+ if (cm->log2_tile_cols < max_log2_tile_cols) aom_wb_write_bit(wb, 0);
// rows
- vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
- if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+ aom_wb_write_bit(wb, cm->log2_tile_rows != 0);
+ if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
#endif // CONFIG_EXT_TILE
}
-static int get_refresh_mask(VP10_COMP *cpi) {
+static int get_refresh_mask(AV1_COMP *cpi) {
int refresh_mask = 0;
#if CONFIG_EXT_REFS
@@ -2712,12 +2705,12 @@
refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
#endif // CONFIG_EXT_REFS
- if (vp10_preserve_existing_gf(cpi)) {
+ if (av1_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term we leave it in the GF slot and,
// if we're updating the GF with the current decoded frame, we save it
// instead to the ARF slot.
- // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+ // Later, in the function av1_encoder.c:av1_update_reference_frames() we
// will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
// there so that it can be done outside of the recode loop.
// Note: This is highly specific to the use of ARF as a forward reference,
@@ -2793,14 +2786,14 @@
}
#endif // CONFIG_EXT_TILE
-static uint32_t write_tiles(VP10_COMP *const cpi, uint8_t *const dst,
+static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
unsigned int *max_tile_size,
unsigned int *max_tile_col_size) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
#if CONFIG_ANS
struct AnsCoder token_ans;
#else
- vp10_writer mode_bc;
+ aom_writer mode_bc;
#endif // CONFIG_ANS
int tile_row, tile_col;
TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
@@ -2827,7 +2820,7 @@
const int is_last_col = (tile_col == tile_cols - 1);
const size_t col_offset = total_size;
- vp10_tile_set_col(&tile_info, cm, tile_col);
+ av1_tile_set_col(&tile_info, cm, tile_col);
// The last column does not have a column header
if (!is_last_col) total_size += 4;
@@ -2839,7 +2832,7 @@
const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
const int data_offset = have_tiles ? 4 : 0;
- vp10_tile_set_row(&tile_info, cm, tile_row);
+ av1_tile_set_row(&tile_info, cm, tile_row);
buf->data = dst + total_size;
@@ -2847,10 +2840,10 @@
// even for the last one, unless no tiling is used at all.
total_size += data_offset;
#if !CONFIG_ANS
- vpx_start_encode(&mode_bc, buf->data + data_offset);
+ aom_start_encode(&mode_bc, buf->data + data_offset);
write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
assert(tok == tok_end);
- vpx_stop_encode(&mode_bc);
+ aom_stop_encode(&mode_bc);
tile_size = mode_bc.pos;
#else
buf_ans_write_reset(buf_ans);
@@ -2864,7 +2857,7 @@
buf->size = tile_size;
// Record the maximum tile size we see, so we can compact headers later.
- *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+ *max_tile_size = AOMMAX(*max_tile_size, tile_size);
if (have_tiles) {
// tile header: size of this tile, or copy offset
@@ -2896,7 +2889,7 @@
// If it is not final packing, record the maximum tile column size we see,
// otherwise, check if the tile size is out of the range.
- *max_tile_col_size = VPXMAX(*max_tile_col_size, col_size);
+ *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
}
}
#else
@@ -2904,7 +2897,7 @@
TileInfo tile_info;
const int is_last_row = (tile_row == tile_rows - 1);
- vp10_tile_set_row(&tile_info, cm, tile_row);
+ av1_tile_set_row(&tile_info, cm, tile_row);
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
@@ -2914,7 +2907,7 @@
const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
- vp10_tile_set_col(&tile_info, cm, tile_col);
+ av1_tile_set_col(&tile_info, cm, tile_col);
buf->data = dst + total_size;
@@ -2922,10 +2915,10 @@
if (!is_last_tile) total_size += 4;
#if !CONFIG_ANS
- vpx_start_encode(&mode_bc, dst + total_size);
+ aom_start_encode(&mode_bc, dst + total_size);
write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
assert(tok == tok_end);
- vpx_stop_encode(&mode_bc);
+ aom_stop_encode(&mode_bc);
tile_size = mode_bc.pos;
#else
buf_ans_write_reset(buf_ans);
@@ -2941,7 +2934,7 @@
buf->size = tile_size;
if (!is_last_tile) {
- *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+ *max_tile_size = AOMMAX(*max_tile_size, tile_size);
// size of this tile
mem_put_le32(buf->data, tile_size);
}
@@ -2953,28 +2946,28 @@
return (uint32_t)total_size;
}
-static void write_render_size(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void write_render_size(const AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
const int scaling_active =
cm->width != cm->render_width || cm->height != cm->render_height;
- vpx_wb_write_bit(wb, scaling_active);
+ aom_wb_write_bit(wb, scaling_active);
if (scaling_active) {
- vpx_wb_write_literal(wb, cm->render_width - 1, 16);
- vpx_wb_write_literal(wb, cm->render_height - 1, 16);
+ aom_wb_write_literal(wb, cm->render_width - 1, 16);
+ aom_wb_write_literal(wb, cm->render_height - 1, 16);
}
}
-static void write_frame_size(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->width - 1, 16);
- vpx_wb_write_literal(wb, cm->height - 1, 16);
+static void write_frame_size(const AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->width - 1, 16);
+ aom_wb_write_literal(wb, cm->height - 1, 16);
write_render_size(cm, wb);
}
-static void write_frame_size_with_refs(VP10_COMP *cpi,
- struct vpx_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_frame_size_with_refs(AV1_COMP *cpi,
+ struct aom_write_bit_buffer *wb) {
+ AV1_COMMON *const cm = &cpi->common;
int found = 0;
MV_REFERENCE_FRAME ref_frame;
@@ -2987,66 +2980,66 @@
found &= cm->render_width == cfg->render_width &&
cm->render_height == cfg->render_height;
}
- vpx_wb_write_bit(wb, found);
+ aom_wb_write_bit(wb, found);
if (found) {
break;
}
}
if (!found) {
- vpx_wb_write_literal(wb, cm->width - 1, 16);
- vpx_wb_write_literal(wb, cm->height - 1, 16);
+ aom_wb_write_literal(wb, cm->width - 1, 16);
+ aom_wb_write_literal(wb, cm->height - 1, 16);
write_render_size(cm, wb);
}
}
-static void write_sync_code(struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+static void write_sync_code(struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
}
static void write_profile(BITSTREAM_PROFILE profile,
- struct vpx_write_bit_buffer *wb) {
+ struct aom_write_bit_buffer *wb) {
switch (profile) {
- case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
- case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
- case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
- case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+ case PROFILE_0: aom_wb_write_literal(wb, 0, 2); break;
+ case PROFILE_1: aom_wb_write_literal(wb, 2, 2); break;
+ case PROFILE_2: aom_wb_write_literal(wb, 1, 2); break;
+ case PROFILE_3: aom_wb_write_literal(wb, 6, 3); break;
default: assert(0);
}
}
static void write_bitdepth_colorspace_sampling(
- VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
+ AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
if (cm->profile >= PROFILE_2) {
- assert(cm->bit_depth > VPX_BITS_8);
- vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
+ assert(cm->bit_depth > AOM_BITS_8);
+ aom_wb_write_bit(wb, cm->bit_depth == AOM_BITS_10 ? 0 : 1);
}
- vpx_wb_write_literal(wb, cm->color_space, 3);
- if (cm->color_space != VPX_CS_SRGB) {
+ aom_wb_write_literal(wb, cm->color_space, 3);
+ if (cm->color_space != AOM_CS_SRGB) {
// 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
- vpx_wb_write_bit(wb, cm->color_range);
+ aom_wb_write_bit(wb, cm->color_range);
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
- vpx_wb_write_bit(wb, cm->subsampling_x);
- vpx_wb_write_bit(wb, cm->subsampling_y);
- vpx_wb_write_bit(wb, 0); // unused
+ aom_wb_write_bit(wb, cm->subsampling_x);
+ aom_wb_write_bit(wb, cm->subsampling_y);
+ aom_wb_write_bit(wb, 0); // unused
} else {
assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
}
} else {
assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
- vpx_wb_write_bit(wb, 0); // unused
+ aom_wb_write_bit(wb, 0); // unused
}
}
-static void write_uncompressed_header(VP10_COMP *cpi,
- struct vpx_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_uncompressed_header(AV1_COMP *cpi,
+ struct aom_write_bit_buffer *wb) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- vpx_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
+ aom_wb_write_literal(wb, AOM_FRAME_MARKER, 2);
write_profile(cm->profile, wb);
@@ -3059,45 +3052,45 @@
const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a reconstructed frame",
frame_to_show);
}
ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
- vpx_wb_write_bit(wb, 1); // show_existing_frame
- vpx_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
+ aom_wb_write_bit(wb, 1); // show_existing_frame
+ aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
return;
} else {
#endif // CONFIG_EXT_REFS
- vpx_wb_write_bit(wb, 0); // show_existing_frame
+ aom_wb_write_bit(wb, 0); // show_existing_frame
#if CONFIG_EXT_REFS
}
#endif // CONFIG_EXT_REFS
- vpx_wb_write_bit(wb, cm->frame_type);
- vpx_wb_write_bit(wb, cm->show_frame);
- vpx_wb_write_bit(wb, cm->error_resilient_mode);
+ aom_wb_write_bit(wb, cm->frame_type);
+ aom_wb_write_bit(wb, cm->show_frame);
+ aom_wb_write_bit(wb, cm->error_resilient_mode);
if (cm->frame_type == KEY_FRAME) {
write_sync_code(wb);
write_bitdepth_colorspace_sampling(cm, wb);
write_frame_size(cm, wb);
if (frame_is_intra_only(cm))
- vpx_wb_write_bit(wb, cm->allow_screen_content_tools);
+ aom_wb_write_bit(wb, cm->allow_screen_content_tools);
} else {
- if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
+ if (!cm->show_frame) aom_wb_write_bit(wb, cm->intra_only);
if (!cm->error_resilient_mode) {
if (cm->intra_only) {
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
} else {
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
}
}
@@ -3111,18 +3104,18 @@
write_bitdepth_colorspace_sampling(cm, wb);
#if CONFIG_EXT_REFS
- vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+ aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
#else
- vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+ aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
#endif // CONFIG_EXT_REFS
write_frame_size(cm, wb);
} else {
MV_REFERENCE_FRAME ref_frame;
#if CONFIG_EXT_REFS
- vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+ aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
#else
- vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+ aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
@@ -3135,14 +3128,14 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
- vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+ aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
REF_FRAMES_LOG2);
- vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
+ aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
}
write_frame_size_with_refs(cpi, wb);
- vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
+ aom_wb_write_bit(wb, cm->allow_high_precision_mv);
fix_interp_filter(cm, cpi->td.counts);
write_interp_filter(cm->interp_filter, wb);
@@ -3150,17 +3143,17 @@
}
if (!cm->error_resilient_mode) {
- vpx_wb_write_bit(
+ aom_wb_write_bit(
wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
}
- vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
+ aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
assert(cm->mib_size == num_8x8_blocks_wide_lookup[cm->sb_size]);
assert(cm->mib_size == 1 << cm->mib_size_log2);
#if CONFIG_EXT_PARTITION
assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
- vpx_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
+ aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
#else
assert(cm->sb_size == BLOCK_64X64);
#endif // CONFIG_EXT_PARTITION
@@ -3186,8 +3179,8 @@
const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
- vpx_wb_write_bit(wb, use_hybrid_pred);
- if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
+ aom_wb_write_bit(wb, use_hybrid_pred);
+ if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
}
write_tile_info(cm, wb);
@@ -3195,35 +3188,35 @@
#if CONFIG_GLOBAL_MOTION
static void write_global_motion_params(Global_Motion_Params *params,
- vpx_prob *probs, vp10_writer *w) {
+ aom_prob *probs, aom_writer *w) {
GLOBAL_MOTION_TYPE gmtype = get_gmtype(params);
- vp10_write_token(w, vp10_global_motion_types_tree, probs,
- &global_motion_types_encodings[gmtype]);
+ av1_write_token(w, av1_global_motion_types_tree, probs,
+ &global_motion_types_encodings[gmtype]);
switch (gmtype) {
case GLOBAL_ZERO: break;
case GLOBAL_AFFINE:
- vp10_write_primitive_symmetric(
+ av1_write_primitive_symmetric(
w, params->motion_params.wmmat[4] >> GM_ALPHA_PREC_DIFF,
GM_ABS_ALPHA_BITS);
- vp10_write_primitive_symmetric(
+ av1_write_primitive_symmetric(
w, (params->motion_params.wmmat[5] >> GM_ALPHA_PREC_DIFF) -
(1 << GM_ALPHA_PREC_BITS),
GM_ABS_ALPHA_BITS);
// fallthrough intended
case GLOBAL_ROTZOOM:
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, (params->motion_params.wmmat[2] >> GM_ALPHA_PREC_DIFF) -
(1 << GM_ALPHA_PREC_BITS),
GM_ABS_ALPHA_BITS);
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[3] >> GM_ALPHA_PREC_DIFF,
GM_ABS_ALPHA_BITS);
// fallthrough intended
case GLOBAL_TRANSLATION:
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[0] >> GM_TRANS_PREC_DIFF,
GM_ABS_TRANS_BITS);
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[1] >> GM_TRANS_PREC_DIFF,
GM_ABS_TRANS_BITS);
break;
@@ -3231,8 +3224,8 @@
}
}
-static void write_global_motion(VP10_COMP *cpi, vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_global_motion(AV1_COMP *cpi, aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
int frame;
for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
if (!cpi->global_motion_used[frame]) {
@@ -3244,14 +3237,14 @@
}
#endif
-static uint32_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
- VP10_COMMON *const cm = &cpi->common;
+static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
+ AV1_COMMON *const cm = &cpi->common;
#if CONFIG_SUPERTX
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
#endif // CONFIG_SUPERTX
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = cpi->td.counts;
- vp10_writer *header_bc;
+ aom_writer *header_bc;
int i, j;
#if CONFIG_ANS
@@ -3260,9 +3253,9 @@
header_bc = &cpi->buf_ans;
buf_ans_write_reset(header_bc);
#else
- vp10_writer real_header_bc;
+ aom_writer real_header_bc;
header_bc = &real_header_bc;
- vpx_start_encode(header_bc, data);
+ aom_start_encode(header_bc, data);
#endif
update_txfm_probs(cm, header_bc, counts);
update_coef_probs(cpi, header_bc);
@@ -3275,39 +3268,39 @@
update_seg_probs(cpi, header_bc);
for (i = 0; i < INTRA_MODES; ++i)
- prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
counts->uv_mode[i], INTRA_MODES, header_bc);
#if CONFIG_EXT_PARTITION_TYPES
- prob_diff_update(vp10_partition_tree, fc->partition_prob[0],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[0],
counts->partition[0], PARTITION_TYPES, header_bc);
for (i = 1; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_ext_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_ext_partition_tree, fc->partition_prob[i],
counts->partition[i], EXT_PARTITION_TYPES, header_bc);
#else
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[i],
counts->partition[i], PARTITION_TYPES, header_bc);
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
- prob_diff_update(vp10_intra_filter_tree, fc->intra_filter_probs[i],
+ prob_diff_update(av1_intra_filter_tree, fc->intra_filter_probs[i],
counts->intra_filter[i], INTRA_FILTERS, header_bc);
#endif // CONFIG_EXT_INTRA
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
- prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+ prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
counts->kf_y_mode[i][j], INTRA_MODES, header_bc);
} else {
#if CONFIG_REF_MV
update_inter_mode_probs(cm, header_bc, counts);
#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+ prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, header_bc);
#endif
@@ -3317,32 +3310,32 @@
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
if (is_interintra_allowed_bsize_group(i)) {
- vp10_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
- cm->counts.interintra[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
+ cm->counts.interintra[i]);
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
prob_diff_update(
- vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
+ av1_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
counts->interintra_mode[i], INTERINTRA_MODES, header_bc);
}
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
- vp10_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
- cm->counts.wedge_interintra[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
+ cm->counts.wedge_interintra[i]);
}
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++)
if (is_interinter_wedge_used(i))
- vp10_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
- cm->counts.wedge_interinter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
+ cm->counts.wedge_interinter[i]);
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
- prob_diff_update(vp10_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
+ prob_diff_update(av1_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
MOTION_VARIATIONS, header_bc);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -3350,22 +3343,22 @@
update_switchable_interp_probs(cm, header_bc, counts);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
- counts->intra_inter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
+ counts->intra_inter[i]);
if (cpi->allow_comp_inter_inter) {
const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
- counts->comp_inter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
+ counts->comp_inter[i]);
}
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; i++) {
for (j = 0; j < (SINGLE_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
- counts->single_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
+ counts->single_ref[i][j]);
}
}
}
@@ -3374,31 +3367,31 @@
for (i = 0; i < REF_CONTEXTS; i++) {
#if CONFIG_EXT_REFS
for (j = 0; j < (FWD_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
- counts->comp_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+ counts->comp_ref[i][j]);
}
for (j = 0; j < (BWD_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
- counts->comp_bwdref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
+ counts->comp_bwdref[i][j]);
}
#else
for (j = 0; j < (COMP_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
- counts->comp_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+ counts->comp_ref[i][j]);
}
#endif // CONFIG_EXT_REFS
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
- prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, header_bc);
- vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
+ aom_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
#if CONFIG_REF_MV
- counts->mv);
+ counts->mv);
#else
- &counts->mv);
+ &counts->mv);
#endif
update_ext_tx_probs(cm, header_bc);
#if CONFIG_SUPERTX
@@ -3415,7 +3408,7 @@
assert(header_size <= 0xffff);
return header_size;
#else
- vpx_stop_encode(header_bc);
+ aom_stop_encode(header_bc);
assert(header_bc->pos <= 0xffff);
return header_bc->pos;
#endif // CONFIG_ANS
@@ -3451,7 +3444,7 @@
}
}
-static int remux_tiles(const VP10_COMMON *const cm, uint8_t *dst,
+static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
const uint32_t data_size, const uint32_t max_tile_size,
const uint32_t max_tile_col_size,
int *const tile_size_bytes,
@@ -3549,19 +3542,19 @@
}
}
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
uint8_t *data = dst;
uint32_t compressed_header_size;
uint32_t uncompressed_header_size;
uint32_t data_size;
- struct vpx_write_bit_buffer wb = { data, 0 };
- struct vpx_write_bit_buffer saved_wb;
+ struct aom_write_bit_buffer wb = { data, 0 };
+ struct aom_write_bit_buffer saved_wb;
unsigned int max_tile_size;
unsigned int max_tile_col_size;
int tile_size_bytes;
int tile_col_size_bytes;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
#if CONFIG_BITSTREAM_DEBUG
@@ -3573,7 +3566,7 @@
#if CONFIG_EXT_REFS
if (cm->show_existing_frame) {
- *size = vpx_wb_bytes_written(&wb);
+ *size = aom_wb_bytes_written(&wb);
return;
}
#endif // CONFIG_EXT_REFS
@@ -3586,18 +3579,18 @@
// describing tile configuration.
#if CONFIG_EXT_TILE
// Number of bytes in tile column size - 1
- vpx_wb_write_literal(&wb, 0, 2);
+ aom_wb_write_literal(&wb, 0, 2);
#endif // CONFIG_EXT_TILE
// Number of bytes in tile size - 1
- vpx_wb_write_literal(&wb, 0, 2);
+ aom_wb_write_literal(&wb, 0, 2);
}
// Size of compressed header
- vpx_wb_write_literal(&wb, 0, 16);
+ aom_wb_write_literal(&wb, 0, 16);
- uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
+ uncompressed_header_size = (uint32_t)aom_wb_bytes_written(&wb);
data += uncompressed_header_size;
- vpx_clear_system_state();
+ aom_clear_system_state();
// Write the compressed header
compressed_header_size = write_compressed_header(cpi, data);
@@ -3618,14 +3611,14 @@
if (have_tiles) {
#if CONFIG_EXT_TILE
assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
- vpx_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
+ aom_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
#endif // CONFIG_EXT_TILE
assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
- vpx_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
+ aom_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
}
// TODO(jbb): Figure out what to do if compressed_header_size > 16 bits.
assert(compressed_header_size <= 0xffff);
- vpx_wb_write_literal(&saved_wb, compressed_header_size, 16);
+ aom_wb_write_literal(&saved_wb, compressed_header_size, 16);
*size = data - dst;
}
diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h
index 01d2c8d..5a4fb19 100644
--- a/av1/encoder/bitstream.h
+++ b/av1/encoder/bitstream.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_BITSTREAM_H_
-#define VP10_ENCODER_BITSTREAM_H_
+#ifndef AV1_ENCODER_BITSTREAM_H_
+#define AV1_ENCODER_BITSTREAM_H_
#ifdef __cplusplus
extern "C" {
@@ -17,11 +17,11 @@
#include "av1/encoder/encoder.h"
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size);
-void vp10_encode_token_init(void);
+void av1_encode_token_init(void);
-static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
+static INLINE int av1_preserve_existing_gf(AV1_COMP *cpi) {
#if CONFIG_EXT_REFS
// Do not swap gf and arf indices for internal overlay frames
return !cpi->multi_arf_allowed && cpi->rc.is_src_frame_alt_ref &&
@@ -36,4 +36,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_BITSTREAM_H_
+#endif // AV1_ENCODER_BITSTREAM_H_
diff --git a/av1/encoder/bitwriter.h b/av1/encoder/bitwriter.h
index 8cc674b..2deffeb 100644
--- a/av1/encoder/bitwriter.h
+++ b/av1/encoder/bitwriter.h
@@ -11,25 +11,25 @@
/* The purpose of this header is to provide compile time pluggable bit writer
* implementations with a common interface. */
-#ifndef VPX10_ENCODER_BITWRITER_H_
-#define VPX10_ENCODER_BITWRITER_H_
+#ifndef AOM10_ENCODER_BITWRITER_H_
+#define AOM10_ENCODER_BITWRITER_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/prob.h"
#if CONFIG_ANS
typedef struct BufAnsCoder BufAnsCoder;
#include "av1/encoder/buf_ans.h"
-#define vp10_writer BufAnsCoder
-#define vp10_write buf_uabs_write
-#define vp10_write_bit buf_uabs_write_bit
-#define vp10_write_literal buf_uabs_write_literal
+#define aom_writer BufAnsCoder
+#define aom_write buf_uabs_write
+#define aom_write_bit buf_uabs_write_bit
+#define aom_write_literal buf_uabs_write_literal
#else
#include "aom_dsp/bitwriter.h"
-#define vp10_writer vpx_writer
-#define vp10_write vpx_write
-#define vp10_write_bit vpx_write_bit
-#define vp10_write_literal vpx_write_literal
+#define aom_writer aom_writer
+#define aom_write aom_write
+#define aom_write_bit aom_write_bit
+#define aom_write_literal aom_write_literal
#endif
-#endif // VPX10_ENCODER_BITWRITER_H_
+#endif // AOM10_ENCODER_BITWRITER_H_
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 65bb1e2..5daa436 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_BLOCK_H_
-#define VP10_ENCODER_BLOCK_H_
+#ifndef AV1_ENCODER_BLOCK_H_
+#define AV1_ENCODER_BLOCK_H_
#include "av1/common/entropymv.h"
#include "av1/common/entropy.h"
@@ -50,8 +50,8 @@
/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
* coefficient in this block was zero) or not. */
-typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
- [COEFF_CONTEXTS][ENTROPY_TOKENS];
+typedef unsigned int av1_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
+ [COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef struct {
int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
@@ -150,7 +150,7 @@
int encode_breakout;
// note that token_costs is the cost when eob node is skipped
- vp10_coeff_cost token_costs[TX_SIZES];
+ av1_coeff_cost token_costs[TX_SIZES];
int optimize;
@@ -179,4 +179,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_BLOCK_H_
+#endif // AV1_ENCODER_BLOCK_H_
diff --git a/av1/encoder/blockiness.c b/av1/encoder/blockiness.c
index 97e201a..487ffe3 100644
--- a/av1/encoder/blockiness.c
+++ b/av1/encoder/blockiness.c
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/common.h"
#include "av1/common/filter.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
@@ -119,12 +119,12 @@
// This function returns the blockiness for the entire frame currently by
// looking at all borders in steps of 4.
-double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
- const unsigned char *img2, int img2_pitch, int width,
- int height) {
+double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
+ const unsigned char *img2, int img2_pitch, int width,
+ int height) {
double blockiness = 0;
int i, j;
- vpx_clear_system_state();
+ aom_clear_system_state();
for (i = 0; i < height;
i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
for (j = 0; j < width; j += 4) {
diff --git a/av1/encoder/buf_ans.c b/av1/encoder/buf_ans.c
index f87c1e1..d20edc3 100644
--- a/av1/encoder/buf_ans.c
+++ b/av1/encoder/buf_ans.c
@@ -13,29 +13,29 @@
#include "av1/common/common.h"
#include "av1/encoder/buf_ans.h"
#include "av1/encoder/encoder.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
-void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
- int size_hint) {
+void av1_buf_ans_alloc(struct BufAnsCoder *c, struct AV1Common *cm,
+ int size_hint) {
c->cm = cm;
c->size = size_hint;
- CHECK_MEM_ERROR(cm, c->buf, vpx_malloc(c->size * sizeof(*c->buf)));
+ CHECK_MEM_ERROR(cm, c->buf, aom_malloc(c->size * sizeof(*c->buf)));
// Initialize to overfull to trigger the assert in write.
c->offset = c->size + 1;
}
-void vp10_buf_ans_free(struct BufAnsCoder *c) {
- vpx_free(c->buf);
+void av1_buf_ans_free(struct BufAnsCoder *c) {
+ aom_free(c->buf);
c->buf = NULL;
c->size = 0;
}
-void vp10_buf_ans_grow(struct BufAnsCoder *c) {
+void av1_buf_ans_grow(struct BufAnsCoder *c) {
struct buffered_ans_symbol *new_buf = NULL;
int new_size = c->size * 2;
- CHECK_MEM_ERROR(c->cm, new_buf, vpx_malloc(new_size * sizeof(*new_buf)));
+ CHECK_MEM_ERROR(c->cm, new_buf, aom_malloc(new_size * sizeof(*new_buf)));
memcpy(new_buf, c->buf, c->size * sizeof(*c->buf));
- vpx_free(c->buf);
+ aom_free(c->buf);
c->buf = new_buf;
c->size = new_size;
}
diff --git a/av1/encoder/buf_ans.h b/av1/encoder/buf_ans.h
index 8a88c32..1ba6e6c 100644
--- a/av1/encoder/buf_ans.h
+++ b/av1/encoder/buf_ans.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_BUF_ANS_H_
-#define VP10_ENCODER_BUF_ANS_H_
+#ifndef AV1_ENCODER_BUF_ANS_H_
+#define AV1_ENCODER_BUF_ANS_H_
// Buffered forward ANS writer.
// Symbols are written to the writer in forward (decode) order and serialzed
// backwards due to ANS's stack like behavior.
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "av1/common/ans.h"
#ifdef __cplusplus
@@ -34,18 +34,18 @@
};
struct BufAnsCoder {
- struct VP10Common *cm;
+ struct AV1Common *cm;
struct buffered_ans_symbol *buf;
int size;
int offset;
};
-void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
- int size_hint);
+void av1_buf_ans_alloc(struct BufAnsCoder *c, struct AV1Common *cm,
+ int size_hint);
-void vp10_buf_ans_free(struct BufAnsCoder *c);
+void av1_buf_ans_free(struct BufAnsCoder *c);
-void vp10_buf_ans_grow(struct BufAnsCoder *c);
+void av1_buf_ans_grow(struct BufAnsCoder *c);
static INLINE void buf_ans_write_reset(struct BufAnsCoder *const c) {
c->offset = 0;
@@ -55,7 +55,7 @@
AnsP8 prob) {
assert(c->offset <= c->size);
if (c->offset == c->size) {
- vp10_buf_ans_grow(c);
+ av1_buf_ans_grow(c);
}
c->buf[c->offset].method = ANS_METHOD_UABS;
c->buf[c->offset].val_start = val;
@@ -67,7 +67,7 @@
const struct rans_sym *const sym) {
assert(c->offset <= c->size);
if (c->offset == c->size) {
- vp10_buf_ans_grow(c);
+ av1_buf_ans_grow(c);
}
c->buf[c->offset].method = ANS_METHOD_RANS;
c->buf[c->offset].val_start = sym->cum_prob;
@@ -106,4 +106,4 @@
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
-#endif // VP10_ENCODER_BUF_ANS_H_
+#endif // AV1_ENCODER_BUF_ANS_H_
diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c
index 9346e1c..2a105fc 100644
--- a/av1/encoder/context_tree.c
+++ b/av1/encoder/context_tree.c
@@ -18,7 +18,7 @@
#endif // CONFIG_EXT_PARTITION
};
-static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
+static void alloc_mode_context(AV1_COMMON *cm, int num_4x4_blk,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif
@@ -33,17 +33,17 @@
for (i = 0; i < MAX_MB_PLANE; ++i) {
#if CONFIG_VAR_TX
- CHECK_MEM_ERROR(cm, ctx->blk_skip[i], vpx_calloc(num_blk, sizeof(uint8_t)));
+ CHECK_MEM_ERROR(cm, ctx->blk_skip[i], aom_calloc(num_blk, sizeof(uint8_t)));
#endif
for (k = 0; k < 3; ++k) {
CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
- vpx_memalign(32, num_pix * sizeof(*ctx->coeff[i][k])));
+ aom_memalign(32, num_pix * sizeof(*ctx->coeff[i][k])));
CHECK_MEM_ERROR(cm, ctx->qcoeff[i][k],
- vpx_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k])));
+ aom_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k])));
CHECK_MEM_ERROR(cm, ctx->dqcoeff[i][k],
- vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
+ aom_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
- vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
+ aom_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
}
}
@@ -51,7 +51,7 @@
for (i = 0; i < 2; ++i) {
CHECK_MEM_ERROR(
cm, ctx->color_index_map[i],
- vpx_memalign(32, num_pix * sizeof(*ctx->color_index_map[i])));
+ aom_memalign(32, num_pix * sizeof(*ctx->color_index_map[i])));
}
}
}
@@ -60,28 +60,28 @@
int i, k;
for (i = 0; i < MAX_MB_PLANE; ++i) {
#if CONFIG_VAR_TX
- vpx_free(ctx->blk_skip[i]);
+ aom_free(ctx->blk_skip[i]);
ctx->blk_skip[i] = 0;
#endif
for (k = 0; k < 3; ++k) {
- vpx_free(ctx->coeff[i][k]);
+ aom_free(ctx->coeff[i][k]);
ctx->coeff[i][k] = 0;
- vpx_free(ctx->qcoeff[i][k]);
+ aom_free(ctx->qcoeff[i][k]);
ctx->qcoeff[i][k] = 0;
- vpx_free(ctx->dqcoeff[i][k]);
+ aom_free(ctx->dqcoeff[i][k]);
ctx->dqcoeff[i][k] = 0;
- vpx_free(ctx->eobs[i][k]);
+ aom_free(ctx->eobs[i][k]);
ctx->eobs[i][k] = 0;
}
}
for (i = 0; i < 2; ++i) {
- vpx_free(ctx->color_index_map[i]);
+ aom_free(ctx->color_index_map[i]);
ctx->color_index_map[i] = 0;
}
}
-static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
+static void alloc_tree_contexts(AV1_COMMON *cm, PC_TREE *tree,
int num_4x4_blk) {
#if CONFIG_EXT_PARTITION_TYPES
alloc_mode_context(cm, num_4x4_blk, PARTITION_NONE, &tree->none);
@@ -180,7 +180,7 @@
// partition level. There are contexts for none, horizontal, vertical, and
// split. Along with a block_size value and a selected block_size which
// represents the state of our search.
-void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
+void av1_setup_pc_tree(AV1_COMMON *cm, ThreadData *td) {
int i, j;
#if CONFIG_EXT_PARTITION
const int leaf_nodes = 256;
@@ -195,12 +195,12 @@
int square_index = 1;
int nodes;
- vpx_free(td->leaf_tree);
+ aom_free(td->leaf_tree);
CHECK_MEM_ERROR(cm, td->leaf_tree,
- vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
- vpx_free(td->pc_tree);
+ aom_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
+ aom_free(td->pc_tree);
CHECK_MEM_ERROR(cm, td->pc_tree,
- vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
+ aom_calloc(tree_nodes, sizeof(*td->pc_tree)));
this_pc = &td->pc_tree[0];
this_leaf = &td->leaf_tree[0];
@@ -248,7 +248,7 @@
}
}
-void vp10_free_pc_tree(ThreadData *td) {
+void av1_free_pc_tree(ThreadData *td) {
#if CONFIG_EXT_PARTITION
const int leaf_nodes = 256;
const int tree_nodes = 256 + 64 + 16 + 4 + 1;
@@ -264,8 +264,8 @@
// Sets up all the leaf nodes in the tree.
for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
- vpx_free(td->pc_tree);
+ aom_free(td->pc_tree);
td->pc_tree = NULL;
- vpx_free(td->leaf_tree);
+ aom_free(td->leaf_tree);
td->leaf_tree = NULL;
}
diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h
index 18f00bb..e121543 100644
--- a/av1/encoder/context_tree.h
+++ b/av1/encoder/context_tree.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_CONTEXT_TREE_H_
-#define VP10_ENCODER_CONTEXT_TREE_H_
+#ifndef AV1_ENCODER_CONTEXT_TREE_H_
+#define AV1_ENCODER_CONTEXT_TREE_H_
#include "av1/common/blockd.h"
#include "av1/encoder/block.h"
@@ -18,8 +18,8 @@
extern "C" {
#endif
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
struct ThreadData;
// Structure to hold snapshot of coding context during the mode picking process
@@ -93,11 +93,11 @@
#endif
} PC_TREE;
-void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_pc_tree(struct ThreadData *td);
+void av1_setup_pc_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_pc_tree(struct ThreadData *td);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */
+#endif /* AV1_ENCODER_CONTEXT_TREE_H_ */
diff --git a/av1/encoder/corner_detect.c b/av1/encoder/corner_detect.c
index 2b2d82d..a0500e3 100644
--- a/av1/encoder/corner_detect.c
+++ b/av1/encoder/corner_detect.c
@@ -14,7 +14,7 @@
#include <math.h>
#include <assert.h>
-#include "vp10/encoder/corner_detect.h"
+#include "av1/encoder/corner_detect.h"
#include "third_party/fastfeat/fast.h"
// Fast_9 wrapper
diff --git a/av1/encoder/corner_detect.h b/av1/encoder/corner_detect.h
index 8db713e..f658a6b 100644
--- a/av1/encoder/corner_detect.h
+++ b/av1/encoder/corner_detect.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_CORNER_DETECT_H_
-#define VP10_ENCODER_CORNER_DETECT_H_
+#ifndef AV1_ENCODER_CORNER_DETECT_H_
+#define AV1_ENCODER_CORNER_DETECT_H_
#include <stdio.h>
#include <stdlib.h>
@@ -18,4 +18,4 @@
int FastCornerDetect(unsigned char *buf, int width, int height, int stride,
int *points, int max_points);
-#endif // VP10_ENCODER_CORNER_DETECT_H
+#endif // AV1_ENCODER_CORNER_DETECT_H
diff --git a/av1/encoder/corner_match.c b/av1/encoder/corner_match.c
index 6b19d5b..02e8212 100644
--- a/av1/encoder/corner_match.c
+++ b/av1/encoder/corner_match.c
@@ -13,7 +13,7 @@
#include <memory.h>
#include <math.h>
-#include "vp10/encoder/corner_match.h"
+#include "av1/encoder/corner_match.h"
#define MATCH_SZ 15
#define MATCH_SZ_BY2 ((MATCH_SZ - 1) / 2)
diff --git a/av1/encoder/corner_match.h b/av1/encoder/corner_match.h
index 3bc8cb9..01c0ea4 100644
--- a/av1/encoder/corner_match.h
+++ b/av1/encoder/corner_match.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_CORNER_MATCH_H_
-#define VP10_ENCODER_CORNER_MATCH_H_
+#ifndef AV1_ENCODER_CORNER_MATCH_H_
+#define AV1_ENCODER_CORNER_MATCH_H_
#include <stdio.h>
#include <stdlib.h>
@@ -26,4 +26,4 @@
int height, int frm_stride, int ref_stride,
double *correspondence_pts);
-#endif // VP10_ENCODER_CORNER_MATCH_H
+#endif // AV1_ENCODER_CORNER_MATCH_H
diff --git a/av1/encoder/cost.c b/av1/encoder/cost.c
index 4542638..8a87f8f 100644
--- a/av1/encoder/cost.c
+++ b/av1/encoder/cost.c
@@ -15,9 +15,9 @@
#endif // CONFIG_ANS
#include "av1/common/entropy.h"
-/* round(-log2(i/256.) * (1 << VP10_PROB_COST_SHIFT))
+/* round(-log2(i/256.) * (1 << AV1_PROB_COST_SHIFT))
Begins with a bogus entry for simpler addressing. */
-const uint16_t vp10_prob_cost[256] = {
+const uint16_t av1_prob_cost[256] = {
4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
@@ -41,8 +41,8 @@
};
#if CONFIG_ANS
-// round(-log2(i/1024.) * (1 << VP10_PROB_COST_SHIFT))
-static const uint16_t vp10_prob_cost10[1024] = {
+// round(-log2(i/1024.) * (1 << AV1_PROB_COST_SHIFT))
+static const uint16_t av1_prob_cost10[1024] = {
5120, 5120, 4608, 4308, 4096, 3931, 3796, 3683, 3584, 3497, 3419, 3349, 3284,
3225, 3171, 3120, 3072, 3027, 2985, 2945, 2907, 2871, 2837, 2804, 2772, 2742,
2713, 2685, 2659, 2633, 2608, 2583, 2560, 2537, 2515, 2494, 2473, 2453, 2433,
@@ -125,15 +125,15 @@
};
#endif // CONFIG_ANS
-static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+static void cost(int *costs, aom_tree tree, const aom_prob *probs, int i,
int c) {
- const vpx_prob prob = probs[i / 2];
+ const aom_prob prob = probs[i / 2];
int b;
assert(prob != 0);
for (b = 0; b <= 1; ++b) {
- const int cc = c + vp10_cost_bit(prob, b);
- const vpx_tree_index ii = tree[i + b];
+ const int cc = c + av1_cost_bit(prob, b);
+ const aom_tree_index ii = tree[i + b];
if (ii <= 0)
costs[-ii] = cc;
@@ -143,26 +143,26 @@
}
#if CONFIG_ANS
-void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
- const rans_dec_lut token_cdf, int skip_eob) {
+void av1_cost_tokens_ans(int *costs, const aom_prob *tree_probs,
+ const rans_dec_lut token_cdf, int skip_eob) {
int c_tree = 0; // Cost of the "tree" nodes EOB and ZERO.
int i;
- costs[EOB_TOKEN] = vp10_cost_bit(tree_probs[0], 0);
- if (!skip_eob) c_tree = vp10_cost_bit(tree_probs[0], 1);
+ costs[EOB_TOKEN] = av1_cost_bit(tree_probs[0], 0);
+ if (!skip_eob) c_tree = av1_cost_bit(tree_probs[0], 1);
for (i = ZERO_TOKEN; i <= CATEGORY6_TOKEN; ++i) {
const int p = token_cdf[i + 1] - token_cdf[i];
- costs[i] = c_tree + vp10_prob_cost10[p];
+ costs[i] = c_tree + av1_prob_cost10[p];
}
}
#endif // CONFIG_ANS
-void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree) {
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
cost(costs, tree, probs, 0, 0);
}
-void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree) {
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
assert(tree[0] <= 0 && tree[1] > 0);
- costs[-tree[0]] = vp10_cost_bit(probs[0], 0);
+ costs[-tree[0]] = av1_cost_bit(probs[0], 0);
cost(costs, tree, probs, 2, 0);
}
diff --git a/av1/encoder/cost.h b/av1/encoder/cost.h
index 5ae2a79..4e4d9bb 100644
--- a/av1/encoder/cost.h
+++ b/av1/encoder/cost.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_COST_H_
-#define VP10_ENCODER_COST_H_
+#ifndef AV1_ENCODER_COST_H_
+#define AV1_ENCODER_COST_H_
#include "aom_dsp/prob.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#if CONFIG_ANS
#include "av1/common/ans.h"
#endif // CONFIG_ANS
@@ -21,50 +21,50 @@
extern "C" {
#endif
-extern const uint16_t vp10_prob_cost[256];
+extern const uint16_t av1_prob_cost[256];
-// The factor to scale from cost in bits to cost in vp10_prob_cost units.
-#define VP10_PROB_COST_SHIFT 9
+// The factor to scale from cost in bits to cost in av1_prob_cost units.
+#define AV1_PROB_COST_SHIFT 9
-#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
+#define av1_cost_zero(prob) (av1_prob_cost[prob])
-#define vp10_cost_one(prob) vp10_cost_zero(256 - (prob))
+#define av1_cost_one(prob) av1_cost_zero(256 - (prob))
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) : (prob))
+#define av1_cost_bit(prob, bit) av1_cost_zero((bit) ? 256 - (prob) : (prob))
// Cost of coding an n bit literal, using 128 (i.e. 50%) probability
// for each bit.
-#define vp10_cost_literal(n) ((n) * (1 << VP10_PROB_COST_SHIFT))
+#define av1_cost_literal(n) ((n) * (1 << AV1_PROB_COST_SHIFT))
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
- vpx_prob p) {
- return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
+ aom_prob p) {
+ return ct[0] * av1_cost_zero(p) + ct[1] * av1_cost_one(p);
}
-static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+static INLINE int treed_cost(aom_tree tree, const aom_prob *probs, int bits,
int len) {
int cost = 0;
- vpx_tree_index i = 0;
+ aom_tree_index i = 0;
do {
const int bit = (bits >> --len) & 1;
- cost += vp10_cost_bit(probs[i >> 1], bit);
+ cost += av1_cost_bit(probs[i >> 1], bit);
i = tree[i + bit];
} while (len);
return cost;
}
-void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree);
-void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree);
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
#if CONFIG_ANS
-void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
- const rans_dec_lut token_cdf, int skip_eob);
+void av1_cost_tokens_ans(int *costs, const aom_prob *tree_probs,
+ const rans_dec_lut token_cdf, int skip_eob);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_COST_H_
+#endif // AV1_ENCODER_COST_H_
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index 8f7812e..fc56436 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -11,9 +11,9 @@
#include <assert.h>
#include <math.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/blockd.h"
#include "av1/common/idct.h"
#include "aom_dsp/fwd_txfm.h"
@@ -1329,10 +1329,10 @@
};
#endif // CONFIG_EXT_TX
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
- vpx_fdct4x4_c(input, output, stride);
+ aom_fdct4x4_c(input, output, stride);
} else {
tran_low_t out[4 * 4];
int i, j;
@@ -1362,8 +1362,8 @@
}
#if CONFIG_EXT_TX
-void vp10_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 4;
const int n2 = 8;
tran_low_t out[8 * 4];
@@ -1391,8 +1391,8 @@
// Note: overall scale factor of transform is 8 times unitary
}
-void vp10_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 4;
const int n2 = 8;
tran_low_t out[8 * 4];
@@ -1420,8 +1420,8 @@
// Note: overall scale factor of transform is 8 times unitary
}
-void vp10_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 8;
const int n2 = 16;
tran_low_t out[16 * 8];
@@ -1449,8 +1449,8 @@
// Note: overall scale factor of transform is 8 times unitary
}
-void vp10_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 8;
const int n2 = 16;
tran_low_t out[16 * 8];
@@ -1478,8 +1478,8 @@
// Note: overall scale factor of transform is 8 times unitary
}
-void vp10_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 16;
const int n2 = 32;
tran_low_t out[32 * 16];
@@ -1508,8 +1508,8 @@
// Note: overall scale factor of transform is 4 times unitary
}
-void vp10_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
const int n = 16;
const int n2 = 32;
tran_low_t out[32 * 16];
@@ -1540,19 +1540,19 @@
#endif // CONFIG_EXT_TX
-void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
- tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan
+void av1_fdct8x8_quant_c(const int16_t *input, int stride,
+ tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan
#if CONFIG_AOM_QM
- ,
- const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+ ,
+ const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
#endif
- ) {
+ ) {
int eob = -1;
int i, j;
@@ -1666,10 +1666,10 @@
*eob_ptr = eob + 1;
}
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
- vpx_fdct8x8_c(input, output, stride);
+ aom_fdct8x8_c(input, output, stride);
} else {
tran_low_t out[64];
int i, j;
@@ -1700,7 +1700,7 @@
/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
pixel. */
-void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
int i;
tran_high_t a1, b1, c1, d1, e1;
const int16_t *ip_pass0 = input;
@@ -1754,10 +1754,10 @@
}
}
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
- vpx_fdct16x16_c(input, output, stride);
+ aom_fdct16x16_c(input, output, stride);
} else {
tran_low_t out[256];
int i, j;
@@ -1786,65 +1786,65 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
- vp10_fht4x4_c(input, output, stride, tx_type);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht4x4_c(input, output, stride, tx_type);
}
#if CONFIG_EXT_TX
-void vp10_highbd_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht4x8_c(input, output, stride, tx_type);
+}
+
+void av1_highbd_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht8x4_c(input, output, stride, tx_type);
+}
+
+void av1_highbd_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht4x8_c(input, output, stride, tx_type);
+ av1_fht8x16_c(input, output, stride, tx_type);
}
-void vp10_highbd_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht8x4_c(input, output, stride, tx_type);
+ av1_fht16x8_c(input, output, stride, tx_type);
}
-void vp10_highbd_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht8x16_c(input, output, stride, tx_type);
+ av1_fht16x32_c(input, output, stride, tx_type);
}
-void vp10_highbd_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht16x8_c(input, output, stride, tx_type);
-}
-
-void vp10_highbd_fht16x32_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
- vp10_fht16x32_c(input, output, stride, tx_type);
-}
-
-void vp10_highbd_fht32x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
- vp10_fht32x16_c(input, output, stride, tx_type);
+ av1_fht32x16_c(input, output, stride, tx_type);
}
#endif // CONFIG_EXT_TX
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
- vp10_fht8x8_c(input, output, stride, tx_type);
+void av1_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht8x8_c(input, output, stride, tx_type);
}
-void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
- int stride) {
- vp10_fwht4x4_c(input, output, stride);
+void av1_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
+ int stride) {
+ av1_fwht4x4_c(input, output, stride);
}
-void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
- vp10_fht16x16_c(input, output, stride, tx_type);
+void av1_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht16x16_c(input, output, stride, tx_type);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EXT_TX
-void vp10_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
- vpx_fdct32x32_c(input, output, stride);
+ aom_fdct32x32_c(input, output, stride);
} else {
tran_low_t out[1024];
int i, j;
@@ -1874,8 +1874,8 @@
}
// Forward identity transform.
-void vp10_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
- int bs, int tx_type) {
+void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
+ int bs, int tx_type) {
int r, c;
const int shift = bs < 32 ? 3 : 2;
if (tx_type == IDTX) {
@@ -1887,10 +1887,10 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fht32x32_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
- vp10_fht32x32_c(input, output, stride, tx_type);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
+ av1_fht32x32_c(input, output, stride, tx_type);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EXT_TX
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index b2635b4..a7183f9 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -12,13 +12,13 @@
#include <math.h>
#include <stdio.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
#include "aom_ports/system_state.h"
#include "av1/common/common.h"
@@ -52,23 +52,22 @@
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define IF_HBD(...) __VA_ARGS__
#else
#define IF_HBD(...)
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
#if CONFIG_SUPERTX
static int check_intra_b(PICK_MODE_CONTEXT *ctx);
-static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PC_TREE *pc_tree);
-static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree);
+static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
#if CONFIG_EXT_INTER
int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
@@ -76,17 +75,17 @@
BLOCK_SIZE bsize_pred, int b_sub8x8, int block);
static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
PC_TREE *pc_tree);
-static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_ori, int mi_col_ori,
int output_enabled, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
int dst_stride[3], PC_TREE *pc_tree);
-static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int output_enabled, PC_TREE *pc_tree);
-static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
TX_TYPE *best_tx, PC_TREE *pc_tree);
@@ -96,7 +95,7 @@
// purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
// which will be faster.
-static const uint8_t VP10_VAR_OFFS[MAX_SB_SIZE] = {
+static const uint8_t AV1_VAR_OFFS[MAX_SB_SIZE] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -111,8 +110,8 @@
#endif // CONFIG_EXT_PARTITION
};
-#if CONFIG_VP9_HIGHBITDEPTH
-static const uint16_t VP10_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+static const uint16_t AV1_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -127,7 +126,7 @@
#endif // CONFIG_EXT_PARTITION
};
-static const uint16_t VP10_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
+static const uint16_t AV1_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
@@ -148,7 +147,7 @@
#endif // CONFIG_EXT_PARTITION
};
-static const uint16_t VP10_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
+static const uint16_t AV1_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
@@ -172,45 +171,45 @@
128 * 16
#endif // CONFIG_EXT_PARTITION
};
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs) {
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs) {
unsigned int sse;
const unsigned int var =
- cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP10_VAR_OFFS, 0, &sse);
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride, AV1_VAR_OFFS, 0, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
switch (bd) {
case 10:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10), 0,
- &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_10), 0, &sse);
break;
case 12:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12), 0,
- &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_12), 0, &sse);
break;
case 8:
default:
var =
cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8), 0, &sse);
+ CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_8), 0, &sse);
break;
}
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
+static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
int mi_row, int mi_col,
BLOCK_SIZE bs) {
@@ -225,9 +224,8 @@
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
- MACROBLOCK *x, int mi_row,
- int mi_col) {
+static BLOCK_SIZE get_rd_var_based_fixed_partition(AV1_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col) {
unsigned int var = get_sby_perpixel_diff_variance(
cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
if (var < 8)
@@ -242,21 +240,21 @@
// Lighter version of set_offsets that only sets the mode info
// pointers.
-static void set_mode_info_offsets(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_mode_info_offsets(AV1_COMP *const cpi, MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row,
int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
-static void set_offsets_without_segment_id(VP10_COMP *cpi,
+static void set_offsets_without_segment_id(AV1_COMP *cpi,
const TileInfo *const tile,
MACROBLOCK *const x, int mi_row,
int mi_col, BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -273,14 +271,14 @@
#endif
// Set up destination pointers.
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
// Set up limit values for MV components.
// Mv beyond the range do not produce new/different prediction block.
- x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
- x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
- x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VPX_INTERP_EXTEND;
- x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VPX_INTERP_EXTEND;
+ x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + AOM_INTERP_EXTEND);
+ x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + AOM_INTERP_EXTEND);
+ x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + AOM_INTERP_EXTEND;
+ x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + AOM_INTERP_EXTEND;
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
@@ -288,20 +286,20 @@
cm->mi_cols);
// Set up source buffers.
- vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+ av1_setup_src_planes(x, cpi->Source, mi_row, mi_col);
// R/D setup.
x->rddiv = cpi->rd.RDDIV;
x->rdmult = cpi->rd.RDMULT;
- // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs()
+ // required by av1_append_sub8x8_mvs_for_idx() and av1_find_best_ref_mvs()
xd->tile = *tile;
}
-static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCK *const x, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const struct segmentation *const seg = &cm->seg;
@@ -317,7 +315,7 @@
seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
- vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+ av1_init_plane_quantizers(cpi, x, mbmi->segment_id);
x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
} else {
@@ -331,11 +329,11 @@
}
#if CONFIG_SUPERTX
-static void set_offsets_supertx(VP10_COMP *cpi, ThreadData *td,
+static void set_offsets_supertx(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &td->mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -348,7 +346,7 @@
cm->mi_cols);
}
-static void set_offsets_extend(VP10_COMP *cpi, ThreadData *td,
+static void set_offsets_extend(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row_pred,
int mi_col_pred, int mi_row_ori, int mi_col_ori,
BLOCK_SIZE bsize_pred) {
@@ -356,7 +354,7 @@
// (mi_row_ori, mi_col_ori, bsize_ori): region for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
MACROBLOCK *const x = &td->mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize_pred];
const int mi_height = num_8x8_blocks_high_lookup[bsize_pred];
@@ -365,10 +363,10 @@
// Set up limit values for MV components.
// Mv beyond the range do not produce new/different prediction block.
- x->mv_row_min = -(((mi_row_pred + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
- x->mv_col_min = -(((mi_col_pred + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
- x->mv_row_max = (cm->mi_rows - mi_row_pred) * MI_SIZE + VPX_INTERP_EXTEND;
- x->mv_col_max = (cm->mi_cols - mi_col_pred) * MI_SIZE + VPX_INTERP_EXTEND;
+ x->mv_row_min = -(((mi_row_pred + mi_height) * MI_SIZE) + AOM_INTERP_EXTEND);
+ x->mv_col_min = -(((mi_col_pred + mi_width) * MI_SIZE) + AOM_INTERP_EXTEND);
+ x->mv_row_max = (cm->mi_rows - mi_row_pred) * MI_SIZE + AOM_INTERP_EXTEND;
+ x->mv_col_max = (cm->mi_cols - mi_col_pred) * MI_SIZE + AOM_INTERP_EXTEND;
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col_pred & (mi_width - 1)) && !(mi_row_pred & (mi_height - 1)));
@@ -382,15 +380,15 @@
x->rdmult = cpi->rd.RDMULT;
}
-static void set_segment_id_supertx(const VP10_COMP *const cpi,
+static void set_segment_id_supertx(const AV1_COMP *const cpi,
MACROBLOCK *const x, const int mi_row,
const int mi_col, const BLOCK_SIZE bsize) {
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
const struct segmentation *seg = &cm->seg;
const int miw =
- VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+ AOMMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
const int mih =
- VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+ AOMMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
const int mi_offset = mi_row * cm->mi_stride + mi_col;
MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
int r, c;
@@ -404,11 +402,11 @@
for (r = 0; r < mih; r++)
for (c = 0; c < miw; c++)
seg_id_supertx =
- VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+ AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
// Initialize plane quantisers
- vp10_init_plane_quantizers(cpi, x, seg_id_supertx);
+ av1_init_plane_quantizers(cpi, x, seg_id_supertx);
x->encode_breakout = cpi->segment_encode_breakout[seg_id_supertx];
}
@@ -419,7 +417,7 @@
}
#endif // CONFIG_SUPERTX
-static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_block_size(AV1_COMP *const cpi, MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
@@ -428,11 +426,11 @@
}
}
-static void set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+static void set_vt_partitioning(AV1_COMP *cpi, MACROBLOCK *const x,
MACROBLOCKD *const xd, VAR_TREE *vt, int mi_row,
int mi_col, const int64_t *const threshold,
const BLOCK_SIZE *const bsize_min) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int hbw = num_8x8_blocks_wide_lookup[vt->bsize] / 2;
const int hbh = num_8x8_blocks_high_lookup[vt->bsize] / 2;
const int has_cols = mi_col + hbw < cm->mi_cols;
@@ -522,8 +520,8 @@
// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
// currently only used on key frame.
-static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[], int q) {
+ AV1_COMMON *const cm = &cpi->common;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
const int threshold_multiplier = is_key_frame ? 20 : 1;
const int64_t threshold_base =
@@ -549,8 +547,8 @@
thresholds[0] = INT64_MIN;
}
-void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q) {
+ AV1_COMMON *const cm = &cpi->common;
SPEED_FEATURES *const sf = &cpi->sf;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
if (sf->partition_search_type != VAR_BASED_PARTITION &&
@@ -578,7 +576,7 @@
// Compute the minmax over the 8x8 subblocks.
static int compute_minmax_8x8(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int highbd,
#endif
int pixels_wide, int pixels_high) {
@@ -594,16 +592,16 @@
if (x8_idx < pixels_wide && y8_idx < pixels_high) {
const int src_offset = y8_idx * src_stride + x8_idx;
const int ref_offset = y8_idx * ref_stride + x8_idx;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (highbd) {
- vpx_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+ aom_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
ref_stride, &min, &max);
} else {
- vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+ aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
ref_stride, &min, &max);
}
#else
- vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
+ aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
&min, &max);
#endif
if ((max - min) > minmax_max) minmax_max = (max - min);
@@ -613,38 +611,38 @@
return (minmax_max - minmax_min);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int avg_4x4(const uint8_t *const src, const int stride,
const int highbd) {
if (highbd) {
- return vpx_highbd_avg_4x4(src, stride);
+ return aom_highbd_avg_4x4(src, stride);
} else {
- return vpx_avg_4x4(src, stride);
+ return aom_avg_4x4(src, stride);
}
}
#else
static INLINE int avg_4x4(const uint8_t *const src, const int stride) {
- return vpx_avg_4x4(src, stride);
+ return aom_avg_4x4(src, stride);
}
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int avg_8x8(const uint8_t *const src, const int stride,
const int highbd) {
if (highbd) {
- return vpx_highbd_avg_8x8(src, stride);
+ return aom_highbd_avg_8x8(src, stride);
} else {
- return vpx_avg_8x8(src, stride);
+ return aom_avg_8x8(src, stride);
}
}
#else
static INLINE int avg_8x8(const uint8_t *const src, const int stride) {
- return vpx_avg_8x8(src, stride);
+ return aom_avg_8x8(src, stride);
}
#endif
static void init_variance_tree(VAR_TREE *const vt,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int highbd,
#endif
BLOCK_SIZE bsize, BLOCK_SIZE leaf_size,
@@ -665,37 +663,37 @@
vt->width = width;
vt->height = height;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
vt->highbd = highbd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (bsize > leaf_size) {
const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
const int px = num_4x4_blocks_wide_lookup[subsize] * 4;
init_variance_tree(vt->split[0],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- subsize, leaf_size, VPXMIN(px, width),
- VPXMIN(px, height), src, src_stride, ref, ref_stride);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ subsize, leaf_size, AOMMIN(px, width),
+ AOMMIN(px, height), src, src_stride, ref, ref_stride);
init_variance_tree(vt->split[1],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- subsize, leaf_size, width - px, VPXMIN(px, height),
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ subsize, leaf_size, width - px, AOMMIN(px, height),
src + px, src_stride, ref + px, ref_stride);
init_variance_tree(vt->split[2],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- subsize, leaf_size, VPXMIN(px, width), height - px,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ subsize, leaf_size, AOMMIN(px, width), height - px,
src + px * src_stride, src_stride, ref + px * ref_stride,
ref_stride);
init_variance_tree(vt->split[3],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
subsize, leaf_size, width - px, height - px,
src + px * src_stride + px, src_stride,
ref + px * ref_stride + px, ref_stride);
@@ -771,7 +769,7 @@
return vt->force_split;
}
-static int check_split(VP10_COMP *const cpi, VAR_TREE *const vt,
+static int check_split(AV1_COMP *const cpi, VAR_TREE *const vt,
const int segment_id, const int64_t *const thresholds) {
if (vt->bsize == BLOCK_16X16) {
vt->force_split = vt->variances.none.variance > thresholds[0];
@@ -782,7 +780,7 @@
// force split to 8x8 block for this 16x16 block.
int minmax =
compute_minmax_8x8(vt->src, vt->src_stride, vt->ref, vt->ref_stride,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
vt->highbd,
#endif
vt->width, vt->height);
@@ -809,10 +807,10 @@
// This function chooses partitioning based on the variance between source and
// reconstructed last (or golden), where variance is computed for down-sampled
// inputs.
-static void choose_partitioning(VP10_COMP *const cpi, ThreadData *const td,
+static void choose_partitioning(AV1_COMP *const cpi, ThreadData *const td,
const TileInfo *const tile, MACROBLOCK *const x,
const int mi_row, const int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
VAR_TREE *const vt = td->var_root[cm->mib_size_log2 - MIN_MIB_SIZE_LOG2];
int i;
@@ -843,7 +841,7 @@
segment_id = get_segment_id(cm, map, cm->sb_size, mi_row, mi_col);
if (cyclic_refresh_segment_id_boosted(segment_id)) {
- int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ int q = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
set_vbp_thresholds(cpi, thresholds, q);
}
}
@@ -880,8 +878,8 @@
assert(yv12 != NULL);
if (yv12_g && yv12_g != yv12) {
- vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
- &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+ av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+ &cm->frame_refs[GOLDEN_FRAME - 1].sf);
y_sad_g = cpi->fn_ptr[bsize].sdf(
x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
xd->plane[0].pre[0].stride);
@@ -889,8 +887,8 @@
y_sad_g = UINT_MAX;
}
- vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
- &cm->frame_refs[LAST_FRAME - 1].sf);
+ av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
+ &cm->frame_refs[LAST_FRAME - 1].sf);
mbmi->ref_frame[0] = LAST_FRAME;
mbmi->ref_frame[1] = NONE;
mbmi->sb_type = cm->sb_size;
@@ -901,11 +899,11 @@
mbmi->interp_filter = BILINEAR;
#endif
- y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
+ y_sad = av1_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
if (y_sad_g < y_sad) {
- vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
- &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+ av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+ &cm->frame_refs[GOLDEN_FRAME - 1].sf);
mbmi->ref_frame[0] = GOLDEN_FRAME;
mbmi->mv[0].as_int = 0;
y_sad = y_sad_g;
@@ -913,7 +911,7 @@
x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
}
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, cm->sb_size);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, cm->sb_size);
for (i = 1; i < MAX_MB_PLANE; ++i) {
struct macroblock_plane *p = &x->plane[i];
@@ -941,25 +939,25 @@
}
}
} else {
- ref = VP10_VAR_OFFS;
+ ref = AV1_VAR_OFFS;
ref_stride = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (xd->bd) {
- case 10: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10); break;
- case 12: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12); break;
+ case 10: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_10); break;
+ case 12: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_12); break;
case 8:
- default: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8); break;
+ default: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_8); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
init_variance_tree(
vt,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH,
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
cm->sb_size, (is_key_frame || low_res) ? BLOCK_4X4 : BLOCK_8X8,
pixels_wide, pixels_high, src, src_stride, ref, ref_stride);
@@ -984,7 +982,7 @@
}
#if CONFIG_DUAL_FILTER
-static void reset_intmv_filter_type(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void reset_intmv_filter_type(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi) {
int dir;
for (dir = 0; dir < 2; ++dir) {
@@ -1006,7 +1004,7 @@
if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
++counts->switchable_interp[ctx][mbmi->interp_filter[dir]];
}
}
@@ -1014,8 +1012,7 @@
#endif
#if CONFIG_GLOBAL_MOTION
static void update_global_motion_used(PREDICTION_MODE mode,
- const MB_MODE_INFO *mbmi,
- VP10_COMP *cpi) {
+ const MB_MODE_INFO *mbmi, AV1_COMP *cpi) {
if (mode == ZEROMV) {
++cpi->global_motion_used[mbmi->ref_frame[0]];
if (has_second_ref(mbmi)) ++cpi->global_motion_used[mbmi->ref_frame[1]];
@@ -1023,11 +1020,11 @@
}
#endif // CONFIG_GLOBAL_MOTION
-static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
+static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
int i, x_idx, y;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1039,8 +1036,8 @@
const struct segmentation *const seg = &cm->seg;
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
- const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
@@ -1065,7 +1062,7 @@
#endif
#if CONFIG_REF_MV
- rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+ rf_type = av1_ref_frame_type(mbmi->ref_frame);
if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
mbmi->mode == NEWMV) {
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
@@ -1091,8 +1088,8 @@
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
- bsize, ctx->rate, ctx->dist, x->skip);
+ av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+ bsize, ctx->rate, ctx->dist, x->skip);
}
}
@@ -1123,7 +1120,7 @@
}
if (cpi->oxcf.aq_mode)
- vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+ av1_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -1157,7 +1154,7 @@
#endif
if (!frame_is_intra_only(cm)) {
if (is_inter_block(mbmi)) {
- vp10_update_mv_count(td);
+ av1_update_mv_count(td);
#if CONFIG_GLOBAL_MOTION
if (bsize >= BLOCK_8X8) {
update_global_motion_used(mbmi->mode, mbmi, cpi);
@@ -1175,13 +1172,13 @@
#endif // CONFIG_GLOBAL_MOTION
if (cm->interp_filter == SWITCHABLE
#if CONFIG_EXT_INTERP
- && vp10_is_interp_needed(xd)
+ && av1_is_interp_needed(xd)
#endif
) {
#if CONFIG_DUAL_FILTER
update_filter_type_count(td->counts, xd, mbmi);
#else
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
++td->counts->switchable_interp[ctx][mbmi->interp_filter];
#endif
}
@@ -1205,14 +1202,14 @@
}
#if CONFIG_SUPERTX
-static void update_state_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_supertx(AV1_COMP *cpi, ThreadData *td,
PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled) {
int y, x_idx;
#if CONFIG_VAR_TX || CONFIG_REF_MV
int i;
#endif
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1223,8 +1220,8 @@
const int mis = cm->mi_stride;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
- const int x_mis = VPXMIN(mi_width, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(mi_height, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(mi_width, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(mi_height, cm->mi_rows - mi_row);
MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
@@ -1242,7 +1239,7 @@
#endif
#if CONFIG_REF_MV
- rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+ rf_type = av1_ref_frame_type(mbmi->ref_frame);
if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
mbmi->mode == NEWMV) {
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
@@ -1261,15 +1258,14 @@
// If segmentation in use
if (seg->enabled) {
if (cpi->vaq_refresh) {
- const int energy = bsize <= BLOCK_16X16
- ? x->mb_energy
- : vp10_block_energy(cpi, x, bsize);
- mi_addr->mbmi.segment_id = vp10_vaq_segment_id(energy);
+ const int energy =
+ bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
+ mi_addr->mbmi.segment_id = av1_vaq_segment_id(energy);
} else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
// For cyclic refresh mode, now update the segment map
// and set the segment id.
- vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
- bsize, ctx->rate, ctx->dist, 1);
+ av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+ bsize, ctx->rate, ctx->dist, 1);
} else {
// Otherwise just set the segment id based on the current segment map
const uint8_t *const map =
@@ -1316,17 +1312,17 @@
if (!output_enabled) return;
if (!frame_is_intra_only(cm)) {
- vp10_update_mv_count(td);
+ av1_update_mv_count(td);
if (cm->interp_filter == SWITCHABLE
#if CONFIG_EXT_INTERP
- && vp10_is_interp_needed(xd)
+ && av1_is_interp_needed(xd)
#endif
) {
#if CONFIG_DUAL_FILTER
update_filter_type_count(td->counts, xd, mbmi);
#else
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
++td->counts->switchable_interp[ctx][mbmi->interp_filter];
#endif
}
@@ -1348,11 +1344,11 @@
}
}
-static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int output_enabled, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = x->plane;
@@ -1369,7 +1365,7 @@
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
- x->mb_energy = vp10_block_energy(cpi, x, bsize);
+ x->mb_energy = av1_block_energy(cpi, x, bsize);
switch (partition) {
case PARTITION_NONE:
@@ -1504,10 +1500,10 @@
ctx->mic.mbmi.tx_type = best_tx;
}
-static void update_supertx_param_sb(VP10_COMP *cpi, ThreadData *td, int mi_row,
+static void update_supertx_param_sb(AV1_COMP *cpi, ThreadData *td, int mi_row,
int mi_col, BLOCK_SIZE bsize, int best_tx,
TX_SIZE supertx_size, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
PARTITION_TYPE partition = pc_tree->partitioning;
BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -1571,8 +1567,8 @@
}
#endif // CONFIG_SUPERTX
-void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col) {
+void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col) {
uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
const int widths[3] = { src->y_crop_width, src->uv_crop_width,
src->uv_crop_width };
@@ -1591,17 +1587,17 @@
x->e_mbd.plane[i].subsampling_y);
}
-static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
int8_t segment_id) {
int segment_qindex;
- VP10_COMMON *const cm = &cpi->common;
- vp10_init_plane_quantizers(cpi, x, segment_id);
- vpx_clear_system_state();
- segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
- return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
+ AV1_COMMON *const cm = &cpi->common;
+ av1_init_plane_quantizers(cpi, x, segment_id);
+ aom_clear_system_state();
+ segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
-static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *const x, int mi_row, int mi_col,
RD_COST *rd_cost,
#if CONFIG_SUPERTX
@@ -1612,7 +1608,7 @@
#endif
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
@@ -1621,7 +1617,7 @@
const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
int i, orig_rdmult;
- vpx_clear_system_state();
+ aom_clear_system_state();
// Use the lower precision, but faster, 32x32 fdct for mode selection.
x->use_lp32x32fdct = 1;
@@ -1657,30 +1653,29 @@
// Set to zero to make sure we do not use the previous encoded frame stats
mbmi->skip = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->source_variance = vp10_high_get_sby_perpixel_variance(
+ x->source_variance = av1_high_get_sby_perpixel_variance(
cpi, &x->plane[0].src, bsize, xd->bd);
} else {
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
}
#else
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Save rdmult before it might be changed, so it can be restored later.
orig_rdmult = x->rdmult;
if (aq_mode == VARIANCE_AQ) {
if (cpi->vaq_refresh) {
- const int energy = bsize <= BLOCK_16X16
- ? x->mb_energy
- : vp10_block_energy(cpi, x, bsize);
- mbmi->segment_id = vp10_vaq_segment_id(energy);
+ const int energy =
+ bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
+ mbmi->segment_id = av1_vaq_segment_id(energy);
// Re-initialise quantiser
- vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+ av1_init_plane_quantizers(cpi, x, mbmi->segment_id);
x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
}
x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
@@ -1689,30 +1684,30 @@
} else if (aq_mode == CYCLIC_REFRESH_AQ) {
// If segment is boosted, use rdmult for that segment.
if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
- x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
+ x->rdmult = av1_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
}
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (frame_is_intra_only(cm)) {
- vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+ av1_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
#if CONFIG_SUPERTX
*totalrate_nocoef = 0;
#endif // CONFIG_SUPERTX
} else {
if (bsize >= BLOCK_8X8) {
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
- ctx, best_rd);
+ av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
+ ctx, best_rd);
#if CONFIG_SUPERTX
*totalrate_nocoef = rd_cost->rate;
#endif // CONFIG_SUPERTX
} else {
- vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ av1_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
#if CONFIG_SUPERTX
- totalrate_nocoef,
+ totalrate_nocoef,
#endif // CONFIG_SUPERTX
- bsize, ctx, best_rd);
+ bsize, ctx, best_rd);
#if CONFIG_SUPERTX
assert(*totalrate_nocoef >= 0);
#endif // CONFIG_SUPERTX
@@ -1722,12 +1717,12 @@
// The decoder rejects sub8x8 partitions when SEG_LVL_SKIP is set.
rd_cost->rate = INT_MAX;
} else {
- vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
- rd_cost,
+ av1_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
+ rd_cost,
#if CONFIG_SUPERTX
- totalrate_nocoef,
+ totalrate_nocoef,
#endif // CONFIG_SUPERTX
- bsize, ctx, best_rd);
+ bsize, ctx, best_rd);
#if CONFIG_SUPERTX
assert(*totalrate_nocoef >= 0);
#endif // CONFIG_SUPERTX
@@ -1740,7 +1735,7 @@
(bsize >= BLOCK_16X16) &&
(cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
- vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
+ av1_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
}
x->rdmult = orig_rdmult;
@@ -1793,7 +1788,7 @@
}
#endif
-static void update_stats(VP10_COMMON *cm, ThreadData *td
+static void update_stats(AV1_COMMON *cm, ThreadData *td
#if CONFIG_SUPERTX
,
int supertx_enabled
@@ -1815,7 +1810,7 @@
#if CONFIG_SUPERTX
if (!supertx_enabled)
#endif
- counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
+ counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++;
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
@@ -1826,53 +1821,53 @@
#endif // CONFIG_EXT_REFS
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- counts->comp_inter[vp10_get_reference_mode_context(
+ counts->comp_inter[av1_get_reference_mode_context(
cm, xd)][has_second_ref(mbmi)]++;
if (has_second_ref(mbmi)) {
#if CONFIG_EXT_REFS
const int bit = (ref0 == GOLDEN_FRAME || ref0 == LAST3_FRAME);
- counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)][0][bit]++;
+ counts->comp_ref[av1_get_pred_context_comp_ref_p(cm, xd)][0][bit]++;
if (!bit) {
- counts->comp_ref[vp10_get_pred_context_comp_ref_p1(
+ counts->comp_ref[av1_get_pred_context_comp_ref_p1(
cm, xd)][1][ref0 == LAST_FRAME]++;
} else {
- counts->comp_ref[vp10_get_pred_context_comp_ref_p2(
+ counts->comp_ref[av1_get_pred_context_comp_ref_p2(
cm, xd)][2][ref0 == GOLDEN_FRAME]++;
}
- counts->comp_bwdref[vp10_get_pred_context_comp_bwdref_p(
+ counts->comp_bwdref[av1_get_pred_context_comp_bwdref_p(
cm, xd)][0][ref1 == ALTREF_FRAME]++;
#else
- counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+ counts->comp_ref[av1_get_pred_context_comp_ref_p(
cm, xd)][0][ref0 == GOLDEN_FRAME]++;
#endif // CONFIG_EXT_REFS
} else {
#if CONFIG_EXT_REFS
const int bit = (ref0 == ALTREF_FRAME || ref0 == BWDREF_FRAME);
- counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0][bit]++;
+ counts->single_ref[av1_get_pred_context_single_ref_p1(xd)][0][bit]++;
if (bit) {
- counts->single_ref[vp10_get_pred_context_single_ref_p2(
+ counts->single_ref[av1_get_pred_context_single_ref_p2(
xd)][1][ref0 != BWDREF_FRAME]++;
} else {
const int bit1 = !(ref0 == LAST2_FRAME || ref0 == LAST_FRAME);
- counts->single_ref[vp10_get_pred_context_single_ref_p3(
- xd)][2][bit1]++;
+ counts
+ ->single_ref[av1_get_pred_context_single_ref_p3(xd)][2][bit1]++;
if (!bit1) {
- counts->single_ref[vp10_get_pred_context_single_ref_p4(
+ counts->single_ref[av1_get_pred_context_single_ref_p4(
xd)][3][ref0 != LAST_FRAME]++;
} else {
- counts->single_ref[vp10_get_pred_context_single_ref_p5(
+ counts->single_ref[av1_get_pred_context_single_ref_p5(
xd)][4][ref0 != LAST3_FRAME]++;
}
}
#else
- counts->single_ref[vp10_get_pred_context_single_ref_p1(
+ counts->single_ref[av1_get_pred_context_single_ref_p1(
xd)][0][ref0 != LAST_FRAME]++;
if (ref0 != LAST_FRAME) {
- counts->single_ref[vp10_get_pred_context_single_ref_p2(
+ counts->single_ref[av1_get_pred_context_single_ref_p2(
xd)][1][ref0 != GOLDEN_FRAME]++;
}
#endif // CONFIG_EXT_REFS
@@ -1933,8 +1928,8 @@
++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(mode)];
} else {
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, -1);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, -1);
update_inter_mode_stats(counts, mode,
#if CONFIG_EXT_INTER
has_second_ref(mbmi),
@@ -1942,13 +1937,13 @@
mode_ctx);
if (mode == NEWMV) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
int idx;
for (idx = 0; idx < 2; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx];
if (mbmi->ref_mv_idx == idx) break;
@@ -1957,13 +1952,13 @@
}
if (mode == NEARMV) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
int idx;
for (idx = 1; idx < 3; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx - 1];
if (mbmi->ref_mv_idx == idx - 1) break;
@@ -1997,8 +1992,8 @@
b_mode)];
} else {
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, j);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, j);
update_inter_mode_stats(counts, b_mode,
#if CONFIG_EXT_INTER
has_second_ref(mbmi),
@@ -2105,7 +2100,7 @@
#endif
}
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
+static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
#if CONFIG_EXT_PARTITION_TYPES
@@ -2129,11 +2124,10 @@
}
}
-static void encode_sb(VP10_COMP *cpi, ThreadData *td,
- const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
- int mi_col, int output_enabled, BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
- const VP10_COMMON *const cm = &cpi->common;
+static void encode_sb(AV1_COMP *cpi, ThreadData *td, const TileInfo *const tile,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2168,8 +2162,7 @@
update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize,
output_enabled, pc_tree);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
- mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (i = 0; i < MAX_MB_PLANE; i++) {
dst_buf[i] = xd->plane[i].dst.buf;
dst_stride[i] = xd->plane[i].dst.stride;
@@ -2185,11 +2178,11 @@
x->skip_optimize = 0;
x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
- vp10_encode_sb_supertx(x, bsize);
- vp10_tokenize_sb_supertx(cpi, td, tp, !output_enabled, bsize);
+ av1_encode_sb_supertx(x, bsize);
+ av1_tokenize_sb_supertx(cpi, td, tp, !output_enabled, bsize);
} else {
xd->mi[0]->mbmi.skip = 1;
- if (output_enabled) td->counts->skip[vp10_get_skip_context(xd)][1]++;
+ if (output_enabled) td->counts->skip[av1_get_skip_context(xd)][1]++;
reset_skip_context(xd, bsize);
}
if (output_enabled) {
@@ -2349,7 +2342,7 @@
static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
int cols_left, int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
- return VPXMIN(bsize, BLOCK_8X8);
+ return AOMMIN(bsize, BLOCK_8X8);
} else {
for (; bsize > 0; bsize -= 3) {
*bh = num_8x8_blocks_high_lookup[bsize];
@@ -2362,7 +2355,7 @@
return bsize;
}
-static void set_partial_sb_partition(const VP10_COMMON *const cm, MODE_INFO *mi,
+static void set_partial_sb_partition(const AV1_COMMON *const cm, MODE_INFO *mi,
int bh_in, int bw_in,
int mi_rows_remaining,
int mi_cols_remaining, BLOCK_SIZE bsize,
@@ -2385,10 +2378,10 @@
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
-static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_fixed_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mib, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int mi_rows_remaining = tile->mi_row_end - mi_row;
const int mi_cols_remaining = tile->mi_col_end - mi_col;
int block_row, block_col;
@@ -2415,7 +2408,7 @@
}
}
-static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, MODE_INFO **mib,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
@@ -2423,7 +2416,7 @@
int *rate_nocoef,
#endif
int do_recon, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2451,9 +2444,9 @@
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
- vp10_rd_cost_reset(&last_part_rdc);
- vp10_rd_cost_reset(&none_rdc);
- vp10_rd_cost_reset(&chosen_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&none_rdc);
+ av1_rd_cost_reset(&chosen_rdc);
pc_tree->partitioning = partition;
@@ -2467,7 +2460,7 @@
if (bsize == BLOCK_16X16 && cpi->vaq_refresh) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- x->mb_energy = vp10_block_energy(cpi, x, bsize);
+ x->mb_energy = av1_block_energy(cpi, x, bsize);
}
if (do_partition_search &&
@@ -2543,7 +2536,7 @@
int rt_nocoef = 0;
#endif
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, &tmp_rdc,
@@ -2555,7 +2548,7 @@
#endif
subsize, &pc_tree->horizontal[1], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
#if CONFIG_SUPERTX
last_part_rate_nocoef = INT_MAX;
#endif
@@ -2585,7 +2578,7 @@
int rt_nocoef = 0;
#endif
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, &tmp_rdc,
@@ -2598,7 +2591,7 @@
subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
#if CONFIG_SUPERTX
last_part_rate_nocoef = INT_MAX;
#endif
@@ -2641,7 +2634,7 @@
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
rd_use_partition(cpi, td, tile_data,
mib + jj * hbs * cm->mi_stride + ii * hbs, tp,
mi_row + y_idx, mi_col + x_idx, subsize, &tmp_rdc.rate,
@@ -2651,7 +2644,7 @@
#endif
i != 3, pc_tree->split[i]);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
#if CONFIG_SUPERTX
last_part_rate_nocoef = INT_MAX;
#endif
@@ -2726,7 +2719,7 @@
restore_context(x, &x_ctx, mi_row, mi_col, bsize);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&chosen_rdc);
+ av1_rd_cost_reset(&chosen_rdc);
#if CONFIG_SUPERTX
chosen_rate_nocoef = INT_MAX;
#endif
@@ -2840,7 +2833,7 @@
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one
// superblock.
-static void get_sb_partition_size_range(const VP10_COMMON *const cm,
+static void get_sb_partition_size_range(const AV1_COMMON *const cm,
MACROBLOCKD *xd, MODE_INFO **mib,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
@@ -2852,8 +2845,8 @@
for (j = 0; j < cm->mib_size; ++j) {
MODE_INFO *mi = mib[index + j];
BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : BLOCK_4X4;
- *min_block_size = VPXMIN(*min_block_size, sb_type);
- *max_block_size = VPXMAX(*max_block_size, sb_type);
+ *min_block_size = AOMMIN(*min_block_size, sb_type);
+ *max_block_size = AOMMAX(*max_block_size, sb_type);
}
index += xd->mi_stride;
}
@@ -2861,11 +2854,11 @@
// Look at neighboring blocks and set a min and max partition size based on
// what they chose.
-static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
+static void rd_auto_partition_range(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCKD *const xd, int mi_row,
int mi_col, BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MODE_INFO **mi = xd->mi;
const int left_in_image = xd->left_available && mi[-1];
const int above_in_image = xd->up_available && mi[-xd->mi_stride];
@@ -2910,30 +2903,30 @@
// Check border cases where max and min from neighbors may not be legal.
max_size = find_partition_size(max_size, mi_rows_remaining, mi_cols_remaining,
&bh, &bw);
- min_size = VPXMIN(min_size, max_size);
+ min_size = AOMMIN(min_size, max_size);
// Test for blocks at the edge of the active image.
// This may be the actual edge of the image or where there are formatting
// bars.
- if (vp10_active_edge_sb(cpi, mi_row, mi_col)) {
+ if (av1_active_edge_sb(cpi, mi_row, mi_col)) {
min_size = BLOCK_4X4;
} else {
- min_size = VPXMIN(cpi->sf.rd_auto_partition_min_limit, min_size);
+ min_size = AOMMIN(cpi->sf.rd_auto_partition_min_limit, min_size);
}
// When use_square_partition_only is true, make sure at least one square
// partition is allowed by selecting the next smaller square size as
// *min_block_size.
if (cpi->sf.use_square_partition_only) {
- min_size = VPXMIN(min_size, next_square_size[max_size]);
+ min_size = AOMMIN(min_size, next_square_size[max_size]);
}
- *min_block_size = VPXMIN(min_size, cm->sb_size);
- *max_block_size = VPXMIN(max_size, cm->sb_size);
+ *min_block_size = AOMMIN(min_size, cm->sb_size);
+ *max_block_size = AOMMIN(max_size, cm->sb_size);
}
// TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
int mi_col, BLOCK_SIZE bsize,
BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -2953,8 +2946,8 @@
for (idx = 0; idx < mi_width; ++idx) {
mi = prev_mi[idy * cm->mi_stride + idx];
bs = mi ? mi->mbmi.sb_type : bsize;
- min_size = VPXMIN(min_size, bs);
- max_size = VPXMAX(max_size, bs);
+ min_size = AOMMIN(min_size, bs);
+ max_size = AOMMAX(max_size, bs);
}
}
}
@@ -2963,8 +2956,8 @@
for (idy = 0; idy < mi_height; ++idy) {
mi = xd->mi[idy * cm->mi_stride - 1];
bs = mi ? mi->mbmi.sb_type : bsize;
- min_size = VPXMIN(min_size, bs);
- max_size = VPXMAX(max_size, bs);
+ min_size = AOMMIN(min_size, bs);
+ max_size = AOMMAX(max_size, bs);
}
}
@@ -2972,8 +2965,8 @@
for (idx = 0; idx < mi_width; ++idx) {
mi = xd->mi[idx - cm->mi_stride];
bs = mi ? mi->mbmi.sb_type : bsize;
- min_size = VPXMIN(min_size, bs);
- max_size = VPXMAX(max_size, bs);
+ min_size = AOMMIN(min_size, bs);
+ max_size = AOMMAX(max_size, bs);
}
}
@@ -2982,8 +2975,8 @@
max_size = max_partition_size[max_size];
}
- *min_bs = VPXMIN(min_size, cm->sb_size);
- *max_bs = VPXMIN(max_size, cm->sb_size);
+ *min_bs = AOMMIN(min_size, cm->sb_size);
+ *max_bs = AOMMIN(max_size, cm->sb_size);
}
static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
@@ -3094,7 +3087,7 @@
#if CONFIG_EXT_PARTITION_TYPES
static void rd_test_partition3(
- VP10_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
+ AV1_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
PC_TREE *pc_tree, RD_COST *best_rdc, PICK_MODE_CONTEXT ctxs[3],
PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, BLOCK_SIZE bsize,
PARTITION_TYPE partition,
@@ -3107,7 +3100,7 @@
MACROBLOCKD *const xd = &x->e_mbd;
RD_COST this_rdc, sum_rdc;
#if CONFIG_SUPERTX
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
int this_rate_nocoef, sum_rate_nocoef;
int abort_flag;
@@ -3214,7 +3207,7 @@
TX_SIZE supertx_size = max_txsize_lookup[bsize];
const PARTITION_TYPE best_partition = pc_tree->partitioning;
pc_tree->partitioning = partition;
- sum_rdc.rate += vp10_cost_bit(
+ sum_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
[supertx_size],
0);
@@ -3230,7 +3223,7 @@
rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
&tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
- tmp_rdc.rate += vp10_cost_bit(
+ tmp_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
[supertx_size],
1);
@@ -3272,7 +3265,7 @@
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
-static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE bsize,
RD_COST *rd_cost,
@@ -3280,7 +3273,7 @@
int *rate_nocoef,
#endif
int64_t best_rd, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3334,15 +3327,15 @@
if (!force_vert_split) { // force_horz_split only
tmp_partition_cost[PARTITION_VERT] = INT_MAX;
tmp_partition_cost[PARTITION_HORZ] =
- vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 0);
+ av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 0);
tmp_partition_cost[PARTITION_SPLIT] =
- vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 1);
+ av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 1);
} else if (!force_horz_split) { // force_vert_split only
tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
tmp_partition_cost[PARTITION_VERT] =
- vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 0);
+ av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 0);
tmp_partition_cost[PARTITION_SPLIT] =
- vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 1);
+ av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 1);
} else { // force_ horz_split && force_vert_split horz_split
tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
tmp_partition_cost[PARTITION_VERT] = INT_MAX;
@@ -3364,15 +3357,15 @@
assert(num_8x8_blocks_wide_lookup[bsize] ==
num_8x8_blocks_high_lookup[bsize]);
- vp10_rd_cost_init(&this_rdc);
- vp10_rd_cost_init(&sum_rdc);
- vp10_rd_cost_reset(&best_rdc);
+ av1_rd_cost_init(&this_rdc);
+ av1_rd_cost_init(&sum_rdc);
+ av1_rd_cost_reset(&best_rdc);
best_rdc.rdcost = best_rd;
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
- x->mb_energy = vp10_block_energy(cpi, x, bsize);
+ x->mb_energy = av1_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
int cb_partition_search_ctrl =
@@ -3424,9 +3417,9 @@
int mb_row = mi_row >> 1;
int mb_col = mi_col >> 1;
int mb_row_end =
- VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+ AOMMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
int mb_col_end =
- VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+ AOMMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
int r, c;
// compute a complexity measure, basically measure inconsistency of motion
@@ -3527,9 +3520,9 @@
int mb_row = mi_row >> 1;
int mb_col = mi_col >> 1;
int mb_row_end =
- VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+ AOMMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
int mb_col_end =
- VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+ AOMMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
int r, c;
int skip = 1;
@@ -3613,7 +3606,7 @@
pc_tree->partitioning = PARTITION_SPLIT;
- sum_rdc.rate += vp10_cost_bit(
+ sum_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup
[PARTITION_SPLIT]][supertx_size],
0);
@@ -3629,7 +3622,7 @@
rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
&tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
- tmp_rdc.rate += vp10_cost_bit(
+ tmp_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup
[PARTITION_SPLIT]][supertx_size],
1);
@@ -3692,7 +3685,7 @@
pc_tree->partitioning = PARTITION_SPLIT;
- sum_rdc.rate += vp10_cost_bit(
+ sum_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup
[PARTITION_SPLIT]][supertx_size],
0);
@@ -3708,7 +3701,7 @@
rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
&tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
- tmp_rdc.rate += vp10_cost_bit(
+ tmp_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup
[PARTITION_SPLIT]][supertx_size],
1);
@@ -3752,7 +3745,7 @@
// PARTITION_HORZ
if (partition_horz_allowed &&
- (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
+ (do_rect || av1_active_h_edge(cpi, mi_row, mi_step))) {
subsize = get_subsize(bsize, PARTITION_HORZ);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
#if CONFIG_DUAL_FILTER
@@ -3835,7 +3828,7 @@
pc_tree->partitioning = PARTITION_HORZ;
- sum_rdc.rate += vp10_cost_bit(
+ sum_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
[supertx_size],
0);
@@ -3850,7 +3843,7 @@
rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
&tmp_rdc.dist, &best_tx, pc_tree);
- tmp_rdc.rate += vp10_cost_bit(
+ tmp_rdc.rate += av1_cost_bit(
cm->fc
->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
[supertx_size],
@@ -3889,7 +3882,7 @@
// PARTITION_VERT
if (partition_vert_allowed &&
- (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
+ (do_rect || av1_active_v_edge(cpi, mi_col, mi_step))) {
subsize = get_subsize(bsize, PARTITION_VERT);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
@@ -3972,7 +3965,7 @@
pc_tree->partitioning = PARTITION_VERT;
- sum_rdc.rate += vp10_cost_bit(
+ sum_rdc.rate += av1_cost_bit(
cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
[supertx_size],
0);
@@ -3987,7 +3980,7 @@
rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
&tmp_rdc.dist, &best_tx, pc_tree);
- tmp_rdc.rate += vp10_cost_bit(
+ tmp_rdc.rate += av1_cost_bit(
cm->fc
->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
[supertx_size],
@@ -4108,10 +4101,10 @@
}
}
-static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, int mi_row,
TOKENEXTRA **tp) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -4124,7 +4117,7 @@
#endif // CONFIG_EXT_PARTITION
// Initialize the left context for the new SB row
- vp10_zero_left_context(xd);
+ av1_zero_left_context(xd);
// Code each SB in the row
for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -4155,7 +4148,7 @@
}
}
- vp10_zero(x->pred_mv);
+ av1_zero(x->pred_mv);
pc_root->index = 0;
if (seg->enabled) {
@@ -4216,7 +4209,7 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
if ((mi_row + MI_SIZE) %
(MI_SIZE *
- VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+ AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
0 &&
mi_row + MI_SIZE < cm->mi_rows &&
cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
@@ -4224,54 +4217,54 @@
SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
for (t = TX_4X4; t <= TX_32X32; ++t)
- vp10_full_to_model_counts(cpi->td.counts->coef[t],
- cpi->td.rd_counts.coef_counts[t]);
- vp10_partial_adapt_probs(cm, mi_row, mi_col);
+ av1_full_to_model_counts(cpi->td.counts->coef[t],
+ cpi->td.rd_counts.coef_counts[t]);
+ av1_partial_adapt_probs(cm, mi_row, mi_col);
++cm->coef_probs_update_idx;
- vp10_copy(subframe_stats->coef_probs_buf[cm->coef_probs_update_idx],
- cm->fc->coef_probs);
- vp10_copy(subframe_stats->coef_counts_buf[cm->coef_probs_update_idx],
- cpi->td.rd_counts.coef_counts);
- vp10_copy(subframe_stats->eob_counts_buf[cm->coef_probs_update_idx],
- cm->counts.eob_branch);
- vp10_fill_token_costs(x->token_costs,
+ av1_copy(subframe_stats->coef_probs_buf[cm->coef_probs_update_idx],
+ cm->fc->coef_probs);
+ av1_copy(subframe_stats->coef_counts_buf[cm->coef_probs_update_idx],
+ cpi->td.rd_counts.coef_counts);
+ av1_copy(subframe_stats->eob_counts_buf[cm->coef_probs_update_idx],
+ cm->counts.eob_branch);
+ av1_fill_token_costs(x->token_costs,
#if CONFIG_ANS
- cm->fc->coef_cdfs,
+ cm->fc->coef_cdfs,
#endif // CONFIG_ANS
- cm->fc->coef_probs);
+ cm->fc->coef_probs);
}
}
#endif // CONFIG_ENTROPY
}
-static void init_encode_frame_mb_context(VP10_COMP *cpi) {
+static void init_encode_frame_mb_context(AV1_COMP *cpi) {
MACROBLOCK *const x = &cpi->td.mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
// Copy data over into macro block data structures.
- vp10_setup_src_planes(x, cpi->Source, 0, 0);
+ av1_setup_src_planes(x, cpi->Source, 0, 0);
- vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
}
-static int check_dual_ref_flags(VP10_COMP *cpi) {
+static int check_dual_ref_flags(AV1_COMP *cpi) {
const int ref_flags = cpi->ref_frame_flags;
if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
return 0;
} else {
- return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG) +
+ return (!!(ref_flags & AOM_GOLD_FLAG) + !!(ref_flags & AOM_LAST_FLAG) +
#if CONFIG_EXT_REFS
- !!(ref_flags & VPX_LAST2_FLAG) + !!(ref_flags & VPX_LAST3_FLAG) +
- !!(ref_flags & VPX_BWD_FLAG) +
+ !!(ref_flags & AOM_LAST2_FLAG) + !!(ref_flags & AOM_LAST3_FLAG) +
+ !!(ref_flags & AOM_BWD_FLAG) +
#endif // CONFIG_EXT_REFS
- !!(ref_flags & VPX_ALT_FLAG)) >= 2;
+ !!(ref_flags & AOM_ALT_FLAG)) >= 2;
}
}
#if !CONFIG_VAR_TX
-static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
+static void reset_skip_tx_size(AV1_COMMON *cm, TX_SIZE max_tx_size) {
int mi_row, mi_col;
const int mis = cm->mi_stride;
MODE_INFO **mi_ptr = cm->mi_grid_visible;
@@ -4285,7 +4278,7 @@
}
#endif
-static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
+static MV_REFERENCE_FRAME get_frame_type(const AV1_COMP *cpi) {
if (frame_is_intra_only(&cpi->common)) return INTRA_FRAME;
#if CONFIG_EXT_REFS
// We will not update the golden frame with an internal overlay frame
@@ -4303,7 +4296,7 @@
return LAST_FRAME;
}
-static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
+static TX_MODE select_tx_mode(const AV1_COMP *cpi, MACROBLOCKD *const xd) {
if (xd->lossless[0]) return ONLY_4X4;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
return ALLOW_32X32;
@@ -4314,8 +4307,8 @@
return cpi->common.tx_mode;
}
-void vp10_init_tile_data(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_init_tile_data(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
int tile_col, tile_row;
@@ -4323,8 +4316,8 @@
unsigned int tile_tok = 0;
if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
- if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
- CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+ if (cpi->tile_data != NULL) aom_free(cpi->tile_data);
+ CHECK_MEM_ERROR(cm, cpi->tile_data, aom_malloc(tile_cols * tile_rows *
sizeof(*cpi->tile_data)));
cpi->allocated_tiles = tile_cols * tile_rows;
@@ -4346,7 +4339,7 @@
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
TileInfo *const tile_info =
&cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
- vp10_tile_init(tile_info, cm, tile_row, tile_col);
+ av1_tile_init(tile_info, cm, tile_row, tile_col);
cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
pre_tok = cpi->tile_tok[tile_row][tile_col];
@@ -4355,16 +4348,16 @@
}
}
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
- int tile_col) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
+ int tile_col) {
+ AV1_COMMON *const cm = &cpi->common;
TileDataEnc *const this_tile =
&cpi->tile_data[tile_row * cm->tile_cols + tile_col];
const TileInfo *const tile_info = &this_tile->tile_info;
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
int mi_row;
- vp10_zero_above_context(cm, tile_info->mi_col_start, tile_info->mi_col_end);
+ av1_zero_above_context(cm, tile_info->mi_col_start, tile_info->mi_col_end);
// Set up pointers to per thread motion search counters.
td->mb.m_search_count_ptr = &td->rd_counts.m_search_count;
@@ -4380,20 +4373,20 @@
assert(cpi->tok_count[tile_row][tile_col] <= allocated_tokens(*tile_info));
}
-static void encode_tiles(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void encode_tiles(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int tile_col, tile_row;
- vp10_init_tile_data(cpi);
+ av1_init_tile_data(cpi);
for (tile_row = 0; tile_row < cm->tile_rows; ++tile_row)
for (tile_col = 0; tile_col < cm->tile_cols; ++tile_col)
- vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col);
+ av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
}
#if CONFIG_FP_MB_STATS
static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
- VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
+ AV1_COMMON *cm, uint8_t **this_frame_mb_stats) {
uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
cm->current_video_frame * cm->MBs * sizeof(uint8_t);
@@ -4452,29 +4445,29 @@
}
#endif // CONFIG_GLOBAL_MOTION
-static void encode_frame_internal(VP10_COMP *cpi) {
+static void encode_frame_internal(AV1_COMP *cpi) {
ThreadData *const td = &cpi->td;
MACROBLOCK *const x = &td->mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
RD_COUNTS *const rdc = &cpi->td.rd_counts;
int i;
- x->min_partition_size = VPXMIN(x->min_partition_size, cm->sb_size);
- x->max_partition_size = VPXMIN(x->max_partition_size, cm->sb_size);
+ x->min_partition_size = AOMMIN(x->min_partition_size, cm->sb_size);
+ x->max_partition_size = AOMMIN(x->max_partition_size, cm->sb_size);
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
- vp10_zero(*td->counts);
- vp10_zero(rdc->coef_counts);
- vp10_zero(rdc->comp_pred_diff);
+ av1_zero(*td->counts);
+ av1_zero(rdc->coef_counts);
+ av1_zero(rdc->comp_pred_diff);
rdc->m_search_count = 0; // Count of motion search hits.
rdc->ex_search_count = 0; // Exhaustive mesh search hits.
#if CONFIG_GLOBAL_MOTION
- vpx_clear_system_state();
- vp10_zero(cpi->global_motion_used);
+ aom_clear_system_state();
+ av1_zero(cpi->global_motion_used);
if (cpi->common.frame_type == INTER_FRAME && cpi->Source) {
YV12_BUFFER_CONFIG *ref_buf;
int frame;
@@ -4488,11 +4481,11 @@
&cm->global_motion[frame]);
if (get_gmtype(&cm->global_motion[frame]) > GLOBAL_ZERO) {
// compute the advantage of using gm parameters over 0 motion
- double erroradvantage = vp10_warp_erroradv(
+ double erroradvantage = av1_warp_erroradv(
&cm->global_motion[frame].motion_params,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
ref_buf->y_buffer, ref_buf->y_width, ref_buf->y_height,
ref_buf->y_stride, cpi->Source->y_buffer, 0, 0,
cpi->Source->y_width, cpi->Source->y_height,
@@ -4510,7 +4503,7 @@
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = cm->seg.enabled
- ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -4519,10 +4512,10 @@
if (!cm->seg.enabled && xd->lossless[0]) x->optimize = 0;
cm->tx_mode = select_tx_mode(cpi, xd);
- vp10_frame_init_quantizer(cpi);
+ av1_frame_init_quantizer(cpi);
- vp10_initialize_rd_consts(cpi);
- vp10_initialize_me_consts(cpi, x, cm->base_qindex);
+ av1_initialize_rd_consts(cpi);
+ av1_initialize_me_consts(cpi, x, cm->base_qindex);
init_encode_frame_mb_context(cpi);
cm->use_prev_frame_mvs =
@@ -4553,17 +4546,17 @@
#if CONFIG_VAR_TX
#if CONFIG_REF_MV
- vp10_zero(x->blk_skip_drl);
+ av1_zero(x->blk_skip_drl);
#endif
#endif
if (cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
cpi->td.var_root[0] == NULL)
- vp10_setup_var_tree(&cpi->common, &cpi->td);
+ av1_setup_var_tree(&cpi->common, &cpi->td);
{
- struct vpx_usec_timer emr_timer;
- vpx_usec_timer_start(&emr_timer);
+ struct aom_usec_timer emr_timer;
+ aom_usec_timer_start(&emr_timer);
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
@@ -4576,13 +4569,13 @@
// TODO(geza.lore): The multi-threaded encoder is not safe with more than
// 1 tile rows, as it uses the single above_context et al arrays from
// cpi->common
- if (VPXMIN(cpi->oxcf.max_threads, cm->tile_cols) > 1 && cm->tile_rows == 1)
- vp10_encode_tiles_mt(cpi);
+ if (AOMMIN(cpi->oxcf.max_threads, cm->tile_cols) > 1 && cm->tile_rows == 1)
+ av1_encode_tiles_mt(cpi);
else
encode_tiles(cpi);
- vpx_usec_timer_mark(&emr_timer);
- cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
+ aom_usec_timer_mark(&emr_timer);
+ cpi->time_encode_sb_row += aom_usec_timer_elapsed(&emr_timer);
}
#if 0
@@ -4591,8 +4584,8 @@
#endif
}
-void vp10_encode_frame(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_encode_frame(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
@@ -4678,10 +4671,10 @@
if (comp_count_zero == 0) {
cm->reference_mode = SINGLE_REFERENCE;
- vp10_zero(counts->comp_inter);
+ av1_zero(counts->comp_inter);
} else if (single_count_zero == 0) {
cm->reference_mode = COMPOUND_REFERENCE;
- vp10_zero(counts->comp_inter);
+ av1_zero(counts->comp_inter);
}
}
@@ -4767,8 +4760,8 @@
const int bidx = idy * 2 + idx;
const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode;
if (intraonly) {
- const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx);
- const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx);
+ const PREDICTION_MODE a = av1_above_block_mode(mi, above_mi, bidx);
+ const PREDICTION_MODE l = av1_left_block_mode(mi, left_mi, bidx);
++counts->kf_y_mode[a][l][bmode];
} else {
++counts->y_mode[0][bmode];
@@ -4776,8 +4769,8 @@
}
} else {
if (intraonly) {
- const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0);
- const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0);
+ const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, 0);
+ const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, 0);
++counts->kf_y_mode[above][left][y_mode];
} else {
++counts->y_mode[size_group_lookup[bsize]][y_mode];
@@ -4832,7 +4825,7 @@
}
}
-static void tx_partition_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void tx_partition_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
BLOCK_SIZE plane_bsize, int mi_row,
int mi_col, FRAME_COUNTS *td_counts) {
const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
@@ -4893,7 +4886,7 @@
}
}
-static void tx_partition_set_contexts(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void tx_partition_set_contexts(AV1_COMMON *cm, MACROBLOCKD *xd,
BLOCK_SIZE plane_bsize, int mi_row,
int mi_col) {
const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
@@ -4913,10 +4906,10 @@
}
#endif
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
@@ -4936,7 +4929,7 @@
int plane;
mbmi->skip = 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
- vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
+ av1_encode_intra_block_plane(x, AOMMAX(bsize, BLOCK_8X8), plane, 1);
if (output_enabled)
sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
frame_is_intra_only(cm));
@@ -4951,10 +4944,10 @@
++counts->ext_intra[1][mbmi->ext_intra_mode_info.use_ext_intra_mode[1]];
if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED) {
int p_angle;
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
p_angle =
mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle))
+ if (av1_is_intra_filter_switchable(p_angle))
++counts->intra_filter[intra_filter_ctx][mbmi->intra_filter];
}
}
@@ -4967,11 +4960,11 @@
xd->plane[plane].color_index_map[0];
// TODO(huisu): this increases the use of token buffer. Needs stretch
// test to verify.
- vp10_tokenize_palette_sb(td, bsize, plane, t);
+ av1_tokenize_palette_sb(td, bsize, plane, t);
}
}
}
- vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
} else {
int ref;
const int is_compound = has_second_ref(mbmi);
@@ -4980,25 +4973,25 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
assert(cfg != NULL);
- vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
- &xd->block_refs[ref]->sf);
+ av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
+ &xd->block_refs[ref]->sf);
}
if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
- VPXMAX(bsize, BLOCK_8X8));
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col,
+ AOMMAX(bsize, BLOCK_8X8));
- vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col,
- VPXMAX(bsize, BLOCK_8X8));
+ av1_build_inter_predictors_sbuv(xd, mi_row, mi_col,
+ AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_OBMC
if (mbmi->motion_variation == OBMC_CAUSAL) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
@@ -5009,7 +5002,7 @@
assert(mbmi->sb_type >= BLOCK_8X8);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -5019,39 +5012,37 @@
dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
dst_buf2[0] = tmp_buf2;
dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
- dst_width1, dst_height1,
- dst_stride1);
- vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
- dst_width2, dst_height2, dst_stride2);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
- mi_col);
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
- dst_stride1, dst_buf2, dst_stride2);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_width1, dst_height1, dst_stride1);
+ av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_width2, dst_height2, dst_stride2);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
}
#endif // CONFIG_OBMC
- vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
+ av1_encode_sb(x, AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (mbmi->tx_size >= TX_SIZES)
- vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
else
#endif
- vp10_tokenize_sb_inter(cpi, td, t, !output_enabled, mi_row, mi_col,
- VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb_inter(cpi, td, t, !output_enabled, mi_row, mi_col,
+ AOMMAX(bsize, BLOCK_8X8));
#else
- vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
#endif
}
@@ -5136,14 +5127,14 @@
if (is_inter_block(mbmi))
#if CONFIG_EXT_TX && CONFIG_RECT_TX
{
- tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+ tx_size = AOMMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
max_txsize_lookup[bsize]);
if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
tx_size = max_txsize_rect_lookup[bsize];
if (xd->lossless[mbmi->segment_id]) tx_size = TX_4X4;
}
#else
- tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+ tx_size = AOMMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
max_txsize_lookup[bsize]);
#endif
else
@@ -5163,10 +5154,9 @@
return 0;
}
-static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
- const VP10_COMMON *const cm = &cpi->common;
+static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
+ const AV1_COMMON *const cm = &cpi->common;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
const PARTITION_TYPE partition = pc_tree->partitioning;
@@ -5274,7 +5264,7 @@
}
}
-static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
#if CONFIG_EXT_INTER
int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
@@ -5283,7 +5273,7 @@
// Used in supertx
// (mi_row_ori, mi_col_ori): location for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi_8x8 = xd->mi[0];
@@ -5296,26 +5286,26 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
- vp10_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred,
- &xd->block_refs[ref]->sf);
+ av1_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred,
+ &xd->block_refs[ref]->sf);
}
if (!b_sub8x8)
- vp10_build_inter_predictors_sb_extend(xd,
+ av1_build_inter_predictors_sb_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, bsize_pred);
+ mi_row_pred, mi_col_pred, bsize_pred);
else
- vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+ av1_build_inter_predictors_sb_sub8x8_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred,
- bsize_pred, block);
+ mi_row_pred, mi_col_pred,
+ bsize_pred, block);
}
-static void predict_b_extend(VP10_COMP *cpi, ThreadData *td,
+static void predict_b_extend(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int block,
int mi_row_ori, int mi_col_ori, int mi_row_pred,
int mi_col_pred, int mi_row_top, int mi_col_top,
@@ -5331,7 +5321,7 @@
// bextend: 1: region to predict is an extension of ori; 0: not
MACROBLOCK *const x = &td->mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int r = (mi_row_pred - mi_row_top) * MI_SIZE;
int c = (mi_col_pred - mi_col_top) * MI_SIZE;
@@ -5368,7 +5358,7 @@
if (output_enabled && !bextend) update_stats(&cpi->common, td, 1);
}
-static void extend_dir(VP10_COMP *cpi, ThreadData *td,
+static void extend_dir(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int block, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, int mi_row, int mi_col,
int mi_row_top, int mi_col_top, int output_enabled,
@@ -5440,7 +5430,7 @@
}
}
-static void extend_all(VP10_COMP *cpi, ThreadData *td,
+static void extend_all(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int block, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, int mi_row, int mi_col,
int mi_row_top, int mi_col_top, int output_enabled,
@@ -5472,13 +5462,13 @@
// then applied to the 2 masked prediction mentioned above in vertical direction
// If the block is split into more than one level, at every stage, masked
// prediction is stored in dst_buf[] passed from higher level.
-static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
int output_enabled, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
int dst_stride[3], PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -5503,7 +5493,7 @@
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -5516,7 +5506,7 @@
dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len);
dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE;
dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE;
@@ -5526,9 +5516,9 @@
dst_buf3[0] = tmp_buf3;
dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (output_enabled && bsize < top_bsize)
cm->counts.partition[ctx][partition]++;
@@ -5570,7 +5560,7 @@
// Smooth
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
0);
@@ -5607,7 +5597,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -5638,7 +5628,7 @@
// Smooth
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
0);
@@ -5673,7 +5663,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -5732,22 +5722,22 @@
if (bsize == BLOCK_8X8 && i != 0)
continue; // Skip <4x4 chroma smoothing
if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
if (mi_row + hbs < cm->mi_rows) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
}
} else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -5783,13 +5773,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -5825,13 +5815,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -5867,7 +5857,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -5875,7 +5865,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -5911,7 +5901,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -5919,7 +5909,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -5938,11 +5928,11 @@
#endif // CONFIG_EXT_PARTITION_TYPES
}
-static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
TX_TYPE *best_tx, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate,
@@ -5962,7 +5952,7 @@
set_skip_context(xd, mi_row, mi_col);
set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize, 0, pc_tree);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (plane = 0; plane < MAX_MB_PLANE; plane++) {
dst_buf[plane] = xd->plane[plane].dst.buf;
dst_stride[plane] = xd->plane[plane].dst.stride;
@@ -5998,20 +5988,20 @@
tx_size = max_txsize_lookup[bsize];
tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
cm->subsampling_y);
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+ av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
- vp10_subtract_plane(x, bsize, plane);
- vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0,
- get_plane_block_size(bsize, pd), coeff_ctx, &this_rate,
- &this_dist, &pnsse, &pnskip);
+ av1_subtract_plane(x, bsize, plane);
+ av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0,
+ get_plane_block_size(bsize, pd), coeff_ctx, &this_rate,
+ &this_dist, &pnsse, &pnskip);
#else
tx_size = max_txsize_lookup[bsize];
tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
cm->subsampling_y);
- vp10_subtract_plane(x, bsize, plane);
- vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
- &pnsse, INT64_MAX, plane, bsize, tx_size, 0);
+ av1_subtract_plane(x, bsize, plane);
+ av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+ &pnsse, INT64_MAX, plane, bsize, tx_size, 0);
#endif // CONFIG_VAR_TX
rate_uv += this_rate;
@@ -6022,7 +6012,7 @@
// luma
tx_size = max_txsize_lookup[bsize];
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
#if CONFIG_EXT_TX
ext_tx_set = get_ext_tx_set(tx_size, bsize, 1);
#endif // CONFIG_EXT_TX
@@ -6046,13 +6036,13 @@
pnsse = 0;
pnskip = 1;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+ av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
- vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, coeff_ctx,
- &this_rate, &this_dist, &pnsse, &pnskip);
+ av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, coeff_ctx, &this_rate,
+ &this_dist, &pnsse, &pnskip);
#else
- vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
- &pnsse, INT64_MAX, 0, bsize, tx_size, 0);
+ av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+ &pnsse, INT64_MAX, 0, bsize, tx_size, 0);
#endif // CONFIG_VAR_TX
#if CONFIG_EXT_TX
@@ -6073,16 +6063,16 @@
sse = sse_uv + pnsse;
skippable = skippable_uv && pnskip;
if (skippable) {
- *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
x->skip = 1;
} else {
if (RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist) <
RDCOST(x->rdmult, x->rddiv, 0, sse)) {
- *tmp_rate += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ *tmp_rate += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
x->skip = 0;
} else {
*tmp_dist = sse;
- *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
x->skip = 1;
}
}
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
index 338cb86..a0ae454 100644
--- a/av1/encoder/encodeframe.h
+++ b/av1/encoder/encodeframe.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_ENCODEFRAME_H_
-#define VP10_ENCODER_ENCODEFRAME_H_
+#ifndef AV1_ENCODER_ENCODEFRAME_H_
+#define AV1_ENCODER_ENCODEFRAME_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -19,7 +19,7 @@
struct macroblock;
struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
// Constants used in SOURCE_VAR_BASED_PARTITION
@@ -29,20 +29,20 @@
#define VAR_HIST_LARGE_CUT_OFF 75
#define VAR_HIST_SMALL_CUT_OFF 45
-void vp10_setup_src_planes(struct macroblock *x,
- const struct yv12_buffer_config *src, int mi_row,
- int mi_col);
+void av1_setup_src_planes(struct macroblock *x,
+ const struct yv12_buffer_config *src, int mi_row,
+ int mi_col);
-void vp10_encode_frame(struct VP10_COMP *cpi);
+void av1_encode_frame(struct AV1_COMP *cpi);
-void vp10_init_tile_data(struct VP10_COMP *cpi);
-void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
- int tile_row, int tile_col);
+void av1_init_tile_data(struct AV1_COMP *cpi);
+void av1_encode_tile(struct AV1_COMP *cpi, struct ThreadData *td, int tile_row,
+ int tile_col);
-void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
+void av1_set_variance_partition_thresholds(struct AV1_COMP *cpi, int q);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEFRAME_H_
+#endif // AV1_ENCODER_ENCODEFRAME_H_
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index e72db2d..07a7748 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/idct.h"
@@ -27,33 +27,33 @@
#include "av1/encoder/rd.h"
#include "av1/encoder/tokenize.h"
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
+ aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
p->src.stride, pd->dst.buf, pd->dst.stride,
x->e_mbd.bd);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride);
}
-typedef struct vp10_token_state {
+typedef struct av1_token_state {
int rate;
int64_t error;
int next;
int16_t token;
tran_low_t qc;
tran_low_t dqc;
-} vp10_token_state;
+} av1_token_state;
// These numbers are empirically obtained.
static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
@@ -66,13 +66,13 @@
rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
}
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
- int ctx) {
+int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+ int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
const int ref = is_inter_block(&xd->mi[0]->mbmi);
- vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
+ av1_token_state tokens[MAX_TX_SQUARE + 1][2];
unsigned best_index[MAX_TX_SQUARE + 1][2];
uint8_t token_cache[MAX_TX_SQUARE];
const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -110,10 +110,10 @@
int best, band = (eob < default_eob) ? band_translate[eob]
: band_translate[eob - 1];
int pt, i, final_eob;
-#if CONFIG_VP9_HIGHBITDEPTH
- const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
#else
- const int *cat6_high_cost = vp10_get_high_cost_table(8);
+ const int *cat6_high_cost = av1_get_high_cost_table(8);
#endif
unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
@@ -138,9 +138,9 @@
for (i = 0; i < eob; i++) {
const int rc = scan[i];
- tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
+ tokens[i][0].rate = av1_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
tokens[i][0].token = t0;
- token_cache[rc] = vp10_pt_energy_class[t0];
+ token_cache[rc] = av1_pt_energy_class[t0];
}
for (i = eob; i-- > 0;) {
@@ -180,11 +180,11 @@
}
dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
dx >>= xd->bd - 8;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
d2 = (int64_t)dx * dx;
tokens[i][0].rate += (best ? rate1 : rate0);
tokens[i][0].error = d2 + (best ? error1 : error0);
@@ -202,11 +202,11 @@
shortcut = 0;
} else {
#if CONFIG_NEW_QUANT
- shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) >
+ shortcut = ((av1_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
+ dequant_val[band_translate[i]]) >
(abs(coeff[rc]) << shift)) &&
- (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) <
+ (av1_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
+ dequant_val[band_translate[i]]) <
(abs(coeff[rc]) << shift)));
#else // CONFIG_NEW_QUANT
#if CONFIG_AOM_QM
@@ -251,19 +251,19 @@
t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
base_bits = 0;
} else {
- base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
+ base_bits = av1_get_token_cost(x, &t0, cat6_high_cost);
t1 = t0;
}
if (next_shortcut) {
if (LIKELY(next < default_eob)) {
if (t0 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t0];
+ token_cache[rc] = av1_pt_energy_class[t0];
pt = get_coef_context(nb, token_cache, i + 1);
rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
}
if (t1 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t1];
+ token_cache[rc] = av1_pt_energy_class[t1];
pt = get_coef_context(nb, token_cache, i + 1);
rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
}
@@ -275,7 +275,7 @@
} else {
// The two states in next stage are identical.
if (next < default_eob && t0 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t0];
+ token_cache[rc] = av1_pt_energy_class[t0];
pt = get_coef_context(nb, token_cache, i + 1);
rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
}
@@ -283,16 +283,16 @@
}
#if CONFIG_NEW_QUANT
- dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) -
+ dx = av1_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
+ dequant_val[band_translate[i]]) -
(coeff[rc] << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
dx >>= xd->bd - 8;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else // CONFIG_NEW_QUANT
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
} else {
@@ -300,7 +300,7 @@
}
#else
dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_NEW_QUANT
d2 = (int64_t)dx * dx;
@@ -312,7 +312,7 @@
if (x) {
#if CONFIG_NEW_QUANT
- tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
+ tokens[i][1].dqc = av1_dequant_abscoeff_nuq(
abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
: tokens[i][1].dqc;
@@ -402,20 +402,18 @@
return final_eob;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef enum QUANT_FUNC {
QUANT_FUNC_LOWBD = 0,
QUANT_FUNC_HIGHBD = 1,
QUANT_FUNC_LAST = 2
} QUANT_FUNC;
-static VP10_QUANT_FACADE
- quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
- { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
- { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
- { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
- { NULL, NULL }
- };
+static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST][QUANT_FUNC_LAST] =
+ { { av1_quantize_fp_facade, av1_highbd_quantize_fp_facade },
+ { av1_quantize_b_facade, av1_highbd_quantize_b_facade },
+ { av1_quantize_dc_facade, av1_highbd_quantize_dc_facade },
+ { NULL, NULL } };
#else
typedef enum QUANT_FUNC {
@@ -423,22 +421,20 @@
QUANT_FUNC_LAST = 1
} QUANT_FUNC;
-static VP10_QUANT_FACADE
- quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
- { vp10_quantize_fp_facade },
- { vp10_quantize_b_facade },
- { vp10_quantize_dc_facade },
- { NULL }
- };
+static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST][QUANT_FUNC_LAST] =
+ { { av1_quantize_fp_facade },
+ { av1_quantize_b_facade },
+ { av1_quantize_dc_facade },
+ { NULL } };
#endif
-static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
+static FWD_TXFM_OPT fwd_txfm_opt_list[AV1_XFORM_QUANT_LAST] = {
FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
};
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- VP10_XFORM_QUANT xform_quant_idx) {
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ AV1_XFORM_QUANT xform_quant_idx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -472,11 +468,11 @@
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
fwd_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+ if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
if (LIKELY(!x->skip_block)) {
quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
@@ -486,15 +482,15 @@
#endif // CONFIG_AOM_QM
);
} else {
- vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+ av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
}
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+ if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
if (LIKELY(!x->skip_block)) {
quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
@@ -504,15 +500,15 @@
#endif // CONFIG_AOM_QM
);
} else {
- vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+ av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
}
}
}
#if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx) {
+void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -533,7 +529,7 @@
fwd_txfm_param.tx_type = tx_type;
fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+ fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
@@ -541,7 +537,7 @@
// TODO(sarahparker) add all of these new quant quantize functions
// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
fwd_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -561,7 +557,7 @@
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
if (tx_size == TX_32X32) {
@@ -579,9 +575,9 @@
}
}
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx) {
+void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -602,7 +598,7 @@
fwd_txfm_param.tx_type = tx_type;
fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+ fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
@@ -610,7 +606,7 @@
// TODO(sarahparker) add all of these new quant quantize functions
// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
fwd_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -629,7 +625,7 @@
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
if (tx_size == TX_32X32) {
@@ -647,9 +643,9 @@
}
}
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx) {
+void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -667,7 +663,7 @@
fwd_txfm_param.tx_type = tx_type;
fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+ fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
@@ -675,7 +671,7 @@
// TODO(sarahparker) add all of these new quant quantize functions
// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
fwd_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -692,7 +688,7 @@
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
if (tx_size == TX_32X32) {
@@ -707,10 +703,9 @@
}
}
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx) {
+void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -728,7 +723,7 @@
fwd_txfm_param.tx_type = tx_type;
fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+ fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
@@ -736,7 +731,7 @@
// TODO(sarahparker) add all of these new quant quantize functions
// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
fwd_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -753,7 +748,7 @@
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
if (tx_size == TX_32X32) {
@@ -803,11 +798,11 @@
{
#endif
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
+ av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, ctx);
#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
}
#if CONFIG_VAR_TX
@@ -817,7 +812,7 @@
#endif
if (p->eobs[block]) {
- *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+ *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
} else {
*a = *l = p->eobs[block] > 0;
}
@@ -841,13 +836,13 @@
inv_txfm_param.eob = p->eobs[block];
inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
inv_txfm_param.bd = xd->bd;
highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
}
@@ -922,41 +917,41 @@
#if CONFIG_NEW_QUANT
ctx = 0;
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
+ av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, ctx);
#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_B);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_B);
#endif // CONFIG_NEW_QUANT
if (p->eobs[block] > 0) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
- xd->bd);
+ av1_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
} else {
- vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
- xd->bd);
+ av1_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
}
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ av1_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
} else {
- vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ av1_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
}
}
}
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
- vp10_subtract_plane(x, bsize, 0);
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
- encode_block_pass1, x);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ av1_subtract_plane(x, bsize, 0);
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+ encode_block_pass1, x);
}
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -983,21 +978,21 @@
#if CONFIG_EXT_TX && CONFIG_RECT_TX
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
#endif
- vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
+ av1_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
#else
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+ av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
#endif
- vp10_subtract_plane(x, bsize, plane);
+ av1_subtract_plane(x, bsize, plane);
arg.ta = ctx.ta[plane];
arg.tl = ctx.tl[plane];
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (tx_size >= TX_SIZES) {
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+ &arg);
} else {
#endif
for (idy = 0; idy < mi_height; idy += bh) {
@@ -1011,14 +1006,14 @@
}
#endif
#else
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+ &arg);
#endif
}
}
#if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -1035,19 +1030,19 @@
#else
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
#endif
- vp10_subtract_plane(x, bsize, plane);
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+ av1_subtract_plane(x, bsize, plane);
+ av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
arg.ta = ctx.ta[plane];
arg.tl = ctx.tl[plane];
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+ &arg);
}
}
#endif // CONFIG_SUPERTX
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *arg) {
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
struct encode_b_args *const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1080,20 +1075,20 @@
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
- vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
- dst_stride, blk_col, blk_row, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+ dst_stride, blk_col, blk_row, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
+ aom_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
src, src_stride, dst, dst_stride, xd->bd);
} else {
- vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+ aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
src_stride, dst, dst_stride);
}
#else
- vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+ aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
src_stride, dst, dst_stride);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
a = &args->ta[blk_col];
l = &args->tl[blk_row];
@@ -1101,20 +1096,20 @@
if (args->enable_optimize_b) {
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
+ av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, ctx);
#else // CONFIG_NEW_QUANT
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (p->eobs[block]) {
- *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+ *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
} else {
*a = *l = 0;
}
} else {
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_B);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_B);
*a = *l = p->eobs[block] > 0;
}
@@ -1124,7 +1119,7 @@
inv_txfm_param.tx_size = tx_size;
inv_txfm_param.eob = *eob;
inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
inv_txfm_param.bd = xd->bd;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
@@ -1133,14 +1128,14 @@
}
#else
inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
*(args->skip) = 0;
}
}
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
- int enable_optimize_b) {
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+ int enable_optimize_b) {
const MACROBLOCKD *const xd = &x->e_mbd;
ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
@@ -1151,8 +1146,8 @@
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size =
plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
+ av1_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
}
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
- vp10_encode_block_intra, &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane,
+ av1_encode_block_intra, &arg);
}
diff --git a/av1/encoder/encodemb.c.orig b/av1/encoder/encodemb.c.orig
deleted file mode 100644
index 4c94032..0000000
--- a/av1/encoder/encodemb.c.orig
+++ /dev/null
@@ -1,1158 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-
-#include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom_ports/mem.h"
-
-#include "av1/common/idct.h"
-#include "av1/common/reconinter.h"
-#include "av1/common/reconintra.h"
-#include "av1/common/scan.h"
-
-#include "av1/encoder/encodemb.h"
-#include "av1/encoder/hybrid_fwd_txfm.h"
-#include "av1/encoder/quantize.h"
-#include "av1/encoder/rd.h"
-#include "av1/encoder/tokenize.h"
-
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
- struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
- const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
-
-#if CONFIG_VP9_HIGHBITDEPTH
- if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
- p->src.stride, pd->dst.buf, pd->dst.stride,
- x->e_mbd.bd);
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride);
-}
-
-typedef struct vp10_token_state {
- int rate;
- int64_t error;
- int next;
- int16_t token;
- tran_low_t qc;
- tran_low_t dqc;
-} vp10_token_state;
-
-// These numbers are empirically obtained.
-static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
- { 10, 6 }, { 8, 5 },
-};
-
-#define UPDATE_RD_COST() \
- { \
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
- }
-
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
- int ctx) {
- MACROBLOCKD *const xd = &mb->e_mbd;
- struct macroblock_plane *const p = &mb->plane[plane];
- struct macroblockd_plane *const pd = &xd->plane[plane];
- const int ref = is_inter_block(&xd->mi[0]->mbmi);
- vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
- unsigned best_index[MAX_TX_SQUARE + 1][2];
- uint8_t token_cache[MAX_TX_SQUARE];
- const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- const int eob = p->eobs[block];
- const PLANE_TYPE type = pd->plane_type;
- const int default_eob = get_tx2d_size(tx_size);
- const int16_t *const dequant_ptr = pd->dequant;
- const uint8_t *const band_translate = get_band_translate(tx_size);
- TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
- const scan_order *const so =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
- const int16_t *const scan = so->scan;
- const int16_t *const nb = so->neighbors;
-<<<<<<< HEAD
- const int shift = get_tx_scale(xd, tx_type, tx_size);
-=======
-#if CONFIG_AOM_QM
- int seg_id = xd->mi[0]->mbmi.segment_id;
- int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
- const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
-#if CONFIG_AOM_QM
- int seg_id = xd->mi[0]->mbmi.segment_id;
- int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
- const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
-#if CONFIG_NEW_QUANT
- int dq = get_dq_profile_from_ctx(ctx);
- const dequant_val_type_nuq *dequant_val = pd->dequant_val_nuq[dq];
-#else
- const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
-#endif // CONFIG_NEW_QUANT
- int next = eob, sz = 0;
- const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1;
- const int64_t rddiv = mb->rddiv;
- int64_t rd_cost0, rd_cost1;
- int rate0, rate1;
- int64_t error0, error1;
- int16_t t0, t1;
- int best, band = (eob < default_eob) ? band_translate[eob]
- : band_translate[eob - 1];
- int pt, i, final_eob;
-#if CONFIG_VP9_HIGHBITDEPTH
- const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
-#else
- const int *cat6_high_cost = vp10_get_high_cost_table(8);
-#endif
- unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
- mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
- const uint16_t *band_counts = &band_count_table[tx_size][band];
- uint16_t band_left = eob - band_cum_count_table[tx_size][band] + 1;
- int shortcut = 0;
- int next_shortcut = 0;
-
- token_costs += band;
-
- assert((!type && !plane) || (type && plane));
- assert(eob <= default_eob);
-
- /* Now set up a Viterbi trellis to evaluate alternative roundings. */
- /* Initialize the sentinel node of the trellis. */
- tokens[eob][0].rate = 0;
- tokens[eob][0].error = 0;
- tokens[eob][0].next = default_eob;
- tokens[eob][0].token = EOB_TOKEN;
- tokens[eob][0].qc = 0;
- tokens[eob][1] = tokens[eob][0];
-
- for (i = 0; i < eob; i++) {
- const int rc = scan[i];
- tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
- tokens[i][0].token = t0;
- token_cache[rc] = vp10_pt_energy_class[t0];
- }
-
- for (i = eob; i-- > 0;) {
- int base_bits, dx;
- int64_t d2;
- const int rc = scan[i];
-#if CONFIG_AOM_QM
- int iwt = iqmatrix[rc];
-#endif
- int x = qcoeff[rc];
- next_shortcut = shortcut;
-
- /* Only add a trellis state for non-zero coefficients. */
- if (UNLIKELY(x)) {
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- /* Evaluate the first possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
-
- if (next_shortcut) {
- /* Consider both possible successor states. */
- if (next < default_eob) {
- pt = get_coef_context(nb, token_cache, i + 1);
- rate0 += (*token_costs)[0][pt][tokens[next][0].token];
- rate1 += (*token_costs)[0][pt][tokens[next][1].token];
- }
- UPDATE_RD_COST();
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- } else {
- if (next < default_eob) {
- pt = get_coef_context(nb, token_cache, i + 1);
- rate0 += (*token_costs)[0][pt][tokens[next][0].token];
- }
- best = 0;
- }
-
- dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- dx >>= xd->bd - 8;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
- d2 = (int64_t)dx * dx;
- tokens[i][0].rate += (best ? rate1 : rate0);
- tokens[i][0].error = d2 + (best ? error1 : error0);
- tokens[i][0].next = next;
- tokens[i][0].qc = x;
- tokens[i][0].dqc = dqcoeff[rc];
- best_index[i][0] = best;
-
- /* Evaluate the second possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
-
- // The threshold of 3 is empirically obtained.
- if (UNLIKELY(abs(x) > 3)) {
- shortcut = 0;
- } else {
-#if CONFIG_NEW_QUANT
- shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) >
- (abs(coeff[rc]) << shift)) &&
- (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) <
- (abs(coeff[rc]) << shift)));
-#else // CONFIG_NEW_QUANT
-#if CONFIG_AOM_QM
- if ((abs(x) * dequant_ptr[rc != 0] * iwt >
- ((abs(coeff[rc]) << shift) << AOM_QM_BITS)) &&
- (abs(x) * dequant_ptr[rc != 0] * iwt <
- (((abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]) << AOM_QM_BITS)))
-#else
- if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
- (abs(x) * dequant_ptr[rc != 0] <
- (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
-#endif // CONFIG_AOM_QM
- shortcut = 1;
- else
- shortcut = 0;
-#endif // CONFIG_NEW_QUANT
- }
-
- if (shortcut) {
- sz = -(x < 0);
- x -= 2 * sz + 1;
- } else {
- tokens[i][1] = tokens[i][0];
- best_index[i][1] = best_index[i][0];
- next = i;
-
- if (UNLIKELY(!(--band_left))) {
- --band_counts;
- band_left = *band_counts;
- --token_costs;
- }
- continue;
- }
-
- /* Consider both possible successor states. */
- if (!x) {
- /* If we reduced this coefficient to zero, check to see if
- * we need to move the EOB back here.
- */
- t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
- t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
- base_bits = 0;
- } else {
- base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
- t1 = t0;
- }
-
- if (next_shortcut) {
- if (LIKELY(next < default_eob)) {
- if (t0 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t0];
- pt = get_coef_context(nb, token_cache, i + 1);
- rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
- }
- if (t1 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t1];
- pt = get_coef_context(nb, token_cache, i + 1);
- rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
- }
- }
-
- UPDATE_RD_COST();
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- } else {
- // The two states in next stage are identical.
- if (next < default_eob && t0 != EOB_TOKEN) {
- token_cache[rc] = vp10_pt_energy_class[t0];
- pt = get_coef_context(nb, token_cache, i + 1);
- rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
- }
- best = 0;
- }
-
-#if CONFIG_NEW_QUANT
- dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
- dequant_val[band_translate[i]]) -
- (coeff[rc] << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- dx >>= xd->bd - 8;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#else // CONFIG_NEW_QUANT
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
- } else {
- dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
- }
-#else
- dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#endif // CONFIG_NEW_QUANT
- d2 = (int64_t)dx * dx;
-
- tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
- tokens[i][1].error = d2 + (best ? error1 : error0);
- tokens[i][1].next = next;
- tokens[i][1].token = best ? t1 : t0;
- tokens[i][1].qc = x;
-
- if (x) {
-#if CONFIG_NEW_QUANT
- tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
- abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
- tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
- : tokens[i][1].dqc;
- if (sz) tokens[i][1].dqc = -tokens[i][1].dqc;
-#else
- tran_low_t offset = dq_step[rc != 0];
- // The 32x32 transform coefficient uses half quantization step size.
- // Account for the rounding difference in the dequantized coefficeint
- // value when the quantization index is dropped from an even number
- // to an odd number.
- if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
-
- if (sz == 0)
- tokens[i][1].dqc = dqcoeff[rc] - offset;
- else
- tokens[i][1].dqc = dqcoeff[rc] + offset;
-#endif // CONFIG_NEW_QUANT
- } else {
- tokens[i][1].dqc = 0;
- }
-
- best_index[i][1] = best;
- /* Finally, make this the new head of the trellis. */
- next = i;
- } else {
- /* There's no choice to make for a zero coefficient, so we don't
- * add a new trellis node, but we do need to update the costs.
- */
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- pt = get_coef_context(nb, token_cache, i + 1);
- /* Update the cost of each path if we're past the EOB token. */
- if (t0 != EOB_TOKEN) {
- tokens[next][0].rate += (*token_costs)[1][pt][t0];
- tokens[next][0].token = ZERO_TOKEN;
- }
- if (t1 != EOB_TOKEN) {
- tokens[next][1].rate += (*token_costs)[1][pt][t1];
- tokens[next][1].token = ZERO_TOKEN;
- }
- best_index[i][0] = best_index[i][1] = 0;
- shortcut = (tokens[next][0].rate != tokens[next][1].rate);
- /* Don't update next, because we didn't add a new node. */
- }
-
- if (UNLIKELY(!(--band_left))) {
- --band_counts;
- band_left = *band_counts;
- --token_costs;
- }
- }
-
- /* Now pick the best path through the whole trellis. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- rate0 += (*token_costs)[0][ctx][t0];
- rate1 += (*token_costs)[0][ctx][t1];
- UPDATE_RD_COST();
- best = rd_cost1 < rd_cost0;
-
- final_eob = -1;
-
- for (i = next; i < eob; i = next) {
- const int x = tokens[i][best].qc;
- const int rc = scan[i];
-#if CONFIG_AOM_QM
- const int iwt = iqmatrix[rc];
- const int dequant =
- (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS;
-#endif
-
- if (x) final_eob = i;
- qcoeff[rc] = x;
- dqcoeff[rc] = tokens[i][best].dqc;
-
- next = tokens[i][best].next;
- best = best_index[i][best];
- }
- final_eob++;
-
- mb->plane[plane].eobs[block] = final_eob;
- assert(final_eob <= default_eob);
- return final_eob;
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef enum QUANT_FUNC {
- QUANT_FUNC_LOWBD = 0,
- QUANT_FUNC_HIGHBD = 1,
- QUANT_FUNC_LAST = 2
-} QUANT_FUNC;
-
-static VP10_QUANT_FACADE
- quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
- { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
- { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
- { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
- { NULL, NULL }
- };
-
-#else
-typedef enum QUANT_FUNC {
- QUANT_FUNC_LOWBD = 0,
- QUANT_FUNC_LAST = 1
-} QUANT_FUNC;
-
-static VP10_QUANT_FACADE
- quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
- { vp10_quantize_fp_facade },
- { vp10_quantize_b_facade },
- { vp10_quantize_dc_facade },
- { NULL }
- };
-#endif
-
-static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
- FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
-};
-
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- VP10_XFORM_QUANT xform_quant_idx) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- const scan_order *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
- tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint16_t *const eob = &p->eobs[block];
- const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-#if CONFIG_AOM_QM
- int seg_id = xd->mi[0]->mbmi.segment_id;
- int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
- const qm_val_t *qmatrix = pd->seg_qmatrix[seg_id][is_intra][tx_size];
- const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
- const int16_t *src_diff;
- const int tx2d_size = get_tx2d_size(tx_size);
-
- FWD_TXFM_PARAM fwd_txfm_param;
- QUANT_PARAM qparam;
-
- fwd_txfm_param.tx_type = tx_type;
- fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[xform_quant_idx];
- fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
- fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
- qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
-#if CONFIG_VP9_HIGHBITDEPTH
- fwd_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
- if (LIKELY(!x->skip_block)) {
- quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
-<<<<<<< HEAD
- coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
- } else {
- vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
-=======
- coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
-#if CONFIG_AOM_QM
- , qmatrix, iqmatrix
-#endif // CONFIG_AOM_QM
- );
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
- }
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
- if (LIKELY(!x->skip_block)) {
- quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
-<<<<<<< HEAD
- coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
- } else {
- vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
-=======
- coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
-#if CONFIG_AOM_QM
- , qmatrix, iqmatrix
-#endif // CONFIG_AOM_QM
- );
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
- }
- }
-}
-
-#if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- const scan_order *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
- tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- int dq = get_dq_profile_from_ctx(ctx);
- uint16_t *const eob = &p->eobs[block];
- const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- const int16_t *src_diff;
- const uint8_t *band = get_band_translate(tx_size);
-
- FWD_TXFM_PARAM fwd_txfm_param;
-
- fwd_txfm_param.tx_type = tx_type;
- fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
- fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
- fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
- fwd_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- highbd_quantize_32x32_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
- p->quant_shift, pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
- dqcoeff, eob, scan_order->scan, band);
- } else {
- highbd_quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant, p->quant_shift, pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
- qcoeff, dqcoeff, eob, scan_order->scan, band);
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- quantize_32x32_nuq(coeff, 1024, x->skip_block, p->quant, p->quant_shift,
- pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
- qcoeff, dqcoeff, eob, scan_order->scan, band);
- } else {
- quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
- p->quant_shift, pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
- dqcoeff, eob, scan_order->scan, band);
- }
-}
-
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- int dq = get_dq_profile_from_ctx(ctx);
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- const scan_order *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
- tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint16_t *const eob = &p->eobs[block];
- const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- const int16_t *src_diff;
- const uint8_t *band = get_band_translate(tx_size);
-
- FWD_TXFM_PARAM fwd_txfm_param;
-
- fwd_txfm_param.tx_type = tx_type;
- fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
- fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
- fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
- fwd_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- highbd_quantize_32x32_fp_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
- pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
- dqcoeff, eob, scan_order->scan, band);
- } else {
- highbd_quantize_fp_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
- pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
- dqcoeff, eob, scan_order->scan, band);
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- quantize_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant_fp, pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
- qcoeff, dqcoeff, eob, scan_order->scan, band);
- } else {
- quantize_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
- pd->dequant,
- (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
- (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
- qcoeff, dqcoeff, eob, scan_order->scan, band);
- }
-}
-
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint16_t *const eob = &p->eobs[block];
- const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- const int16_t *src_diff;
- int dq = get_dq_profile_from_ctx(ctx);
-
- FWD_TXFM_PARAM fwd_txfm_param;
-
- fwd_txfm_param.tx_type = tx_type;
- fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
- fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
- fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
- fwd_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- highbd_quantize_dc_32x32_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
- p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
- pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
- } else {
- highbd_quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant[0], p->quant_shift[0], pd->dequant[0],
- p->cuml_bins_nuq[dq][0],
- pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- quantize_dc_32x32_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant[0], p->quant_shift[0], pd->dequant[0],
- p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
- qcoeff, dqcoeff, eob);
- } else {
- quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
- p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
- pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
- }
-}
-
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const struct macroblock_plane *const p = &x->plane[plane];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
- tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint16_t *const eob = &p->eobs[block];
- const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- const int16_t *src_diff;
- int dq = get_dq_profile_from_ctx(ctx);
-
- FWD_TXFM_PARAM fwd_txfm_param;
-
- fwd_txfm_param.tx_type = tx_type;
- fwd_txfm_param.tx_size = tx_size;
- fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
- fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
- fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
- fwd_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- highbd_quantize_dc_32x32_fp_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
- pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
- qcoeff, dqcoeff, eob);
- } else {
- highbd_quantize_dc_fp_nuq(
- coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
- pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
- qcoeff, dqcoeff, eob);
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
- if (tx_size == TX_32X32) {
- quantize_dc_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant_fp[0], pd->dequant[0],
- p->cuml_bins_nuq[dq][0],
- pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
- } else {
- quantize_dc_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
- p->quant_fp[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
- pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
- }
-}
-#endif // CONFIG_NEW_QUANT
-
-static void encode_block(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
- struct encode_b_args *const args = arg;
- MACROBLOCK *const x = args->x;
- MACROBLOCKD *const xd = &x->e_mbd;
- int ctx;
- struct macroblock_plane *const p = &x->plane[plane];
- struct macroblockd_plane *const pd = &xd->plane[plane];
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint8_t *dst;
- ENTROPY_CONTEXT *a, *l;
- INV_TXFM_PARAM inv_txfm_param;
-#if CONFIG_VAR_TX
- int i;
- const int bwl = b_width_log2_lookup[plane_bsize];
-#endif
- dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
- a = &args->ta[blk_col];
- l = &args->tl[blk_row];
-#if CONFIG_VAR_TX
- ctx = get_entropy_context(tx_size, a, l);
-#else
- ctx = combine_entropy_contexts(*a, *l);
-#endif
-
-#if CONFIG_VAR_TX
- // Assert not magic number (uninitialised).
- assert(x->blk_skip[plane][(blk_row << bwl) + blk_col] != 234);
-
- if (x->blk_skip[plane][(blk_row << bwl) + blk_col] == 0) {
-#else
- {
-#endif
-#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
-#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
-#endif // CONFIG_NEW_QUANT
- }
-#if CONFIG_VAR_TX
- else {
- p->eobs[block] = 0;
- }
-#endif
-
- if (p->eobs[block]) {
- *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
- } else {
- *a = *l = p->eobs[block] > 0;
- }
-
-#if CONFIG_VAR_TX
- for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i) {
- a[i] = a[0];
- }
- for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i) {
- l[i] = l[0];
- }
-#endif
-
- if (p->eobs[block]) *(args->skip) = 0;
-
- if (p->eobs[block] == 0) return;
-
- // inverse transform parameters
- inv_txfm_param.tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
- inv_txfm_param.tx_size = tx_size;
- inv_txfm_param.eob = p->eobs[block];
- inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- inv_txfm_param.bd = xd->bd;
- highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
- inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
-}
-
-#if CONFIG_VAR_TX
-static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *arg) {
- struct encode_b_args *const args = arg;
- MACROBLOCK *const x = args->x;
- MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- const int tx_row = blk_row >> (1 - pd->subsampling_y);
- const int tx_col = blk_col >> (1 - pd->subsampling_x);
- const TX_SIZE plane_tx_size =
- plane ? get_uv_tx_size_impl(mbmi->inter_tx_size[tx_row][tx_col], bsize, 0,
- 0)
- : mbmi->inter_tx_size[tx_row][tx_col];
-
- int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
- int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
-
- if (xd->mb_to_bottom_edge < 0)
- max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
- if (xd->mb_to_right_edge < 0)
- max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
-
- if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
-
- if (tx_size == plane_tx_size) {
- encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
- } else {
- int bsl = b_width_log2_lookup[bsize];
- int i;
-
- assert(bsl > 0);
- --bsl;
-
-#if CONFIG_EXT_TX
- assert(tx_size < TX_SIZES);
-#endif // CONFIG_EXT_TX
-
- for (i = 0; i < 4; ++i) {
- const int offsetr = blk_row + ((i >> 1) << bsl);
- const int offsetc = blk_col + ((i & 0x01) << bsl);
- int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
-
- if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
-
- encode_block_inter(plane, block + i * step, offsetr, offsetc, plane_bsize,
- tx_size - 1, arg);
- }
- }
-}
-#endif
-
-static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *arg) {
- MACROBLOCK *const x = (MACROBLOCK *)arg;
- MACROBLOCKD *const xd = &x->e_mbd;
- struct macroblock_plane *const p = &x->plane[plane];
- struct macroblockd_plane *const pd = &xd->plane[plane];
- tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- uint8_t *dst;
-#if CONFIG_NEW_QUANT
- int ctx;
-#endif // CONFIG_NEW_QUANT
- dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
-
-#if CONFIG_NEW_QUANT
- ctx = 0;
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
-#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_B);
-#endif // CONFIG_NEW_QUANT
-
- if (p->eobs[block] > 0) {
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
- xd->bd);
- } else {
- vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
- xd->bd);
- }
- return;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
- if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
- } else {
- vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
- }
- }
-}
-
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
- vp10_subtract_plane(x, bsize, 0);
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
- encode_block_pass1, x);
-}
-
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
- MACROBLOCKD *const xd = &x->e_mbd;
- struct optimize_ctx ctx;
- MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
- int plane;
-
- mbmi->skip = 1;
-
- if (x->skip) return;
-
- for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-#if CONFIG_VAR_TX
- // TODO(jingning): Clean this up.
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
- const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
- const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
- const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
- const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
- const int bh = num_4x4_blocks_wide_lookup[txb_size];
- int idx, idy;
- int block = 0;
- int step = num_4x4_blocks_txsize_lookup[max_tx_size];
- vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
-#else
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
-#endif
- vp10_subtract_plane(x, bsize, plane);
- arg.ta = ctx.ta[plane];
- arg.tl = ctx.tl[plane];
-
-#if CONFIG_VAR_TX
- for (idy = 0; idy < mi_height; idy += bh) {
- for (idx = 0; idx < mi_width; idx += bh) {
- encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
- &arg);
- block += step;
- }
- }
-#else
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
-#endif
- }
-}
-
-#if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
- MACROBLOCKD *const xd = &x->e_mbd;
- struct optimize_ctx ctx;
- MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
- int plane;
-
- mbmi->skip = 1;
- if (x->skip) return;
-
- for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- const struct macroblockd_plane *const pd = &xd->plane[plane];
-#if CONFIG_VAR_TX
- const TX_SIZE tx_size = TX_4X4;
-#else
- const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-#endif
- vp10_subtract_plane(x, bsize, plane);
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
- arg.ta = ctx.ta[plane];
- arg.tl = ctx.tl[plane];
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
- }
-}
-#endif // CONFIG_SUPERTX
-
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *arg) {
- struct encode_b_args *const args = arg;
- MACROBLOCK *const x = args->x;
- MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct macroblock_plane *const p = &x->plane[plane];
- struct macroblockd_plane *const pd = &xd->plane[plane];
- tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- const TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- PREDICTION_MODE mode;
- const int bwl = b_width_log2_lookup[plane_bsize];
- const int bhl = b_height_log2_lookup[plane_bsize];
- const int diff_stride = 4 * (1 << bwl);
- uint8_t *src, *dst;
- int16_t *src_diff;
- uint16_t *eob = &p->eobs[block];
- const int src_stride = p->src.stride;
- const int dst_stride = pd->dst.stride;
- const int tx1d_width = num_4x4_blocks_wide_txsize_lookup[tx_size] << 2;
- const int tx1d_height = num_4x4_blocks_high_txsize_lookup[tx_size] << 2;
- ENTROPY_CONTEXT *a = NULL, *l = NULL;
- int ctx;
-
- INV_TXFM_PARAM inv_txfm_param;
-
- assert(tx1d_width == tx1d_height);
-
- dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
- src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
- src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
- mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
- vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
- dst_stride, blk_col, blk_row, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
- } else {
- vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
- src_stride, dst, dst_stride);
- }
-#else
- vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
- src_stride, dst, dst_stride);
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- a = &args->ta[blk_col];
- l = &args->tl[blk_row];
- ctx = combine_entropy_contexts(*a, *l);
-
- if (args->enable_optimize_b) {
-#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, ctx);
-#else // CONFIG_NEW_QUANT
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
-#endif // CONFIG_NEW_QUANT
- if (p->eobs[block]) {
- *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
- } else {
- *a = *l = 0;
- }
- } else {
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_B);
- *a = *l = p->eobs[block] > 0;
- }
-
- if (*eob) {
- // inverse transform
- inv_txfm_param.tx_type = tx_type;
- inv_txfm_param.tx_size = tx_size;
- inv_txfm_param.eob = *eob;
- inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
- inv_txfm_param.bd = xd->bd;
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
- } else {
- inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
- }
-#else
- inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- *(args->skip) = 0;
- }
-}
-
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
- int enable_optimize_b) {
- const MACROBLOCKD *const xd = &x->e_mbd;
- ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
- ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
-
- struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip,
- ta, tl, enable_optimize_b };
- if (enable_optimize_b) {
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- const TX_SIZE tx_size =
- plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
- }
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
- vp10_encode_block_intra, &arg);
-}
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
index 4b88831..c9f9f6d 100644
--- a/av1/encoder/encodemb.h
+++ b/av1/encoder/encodemb.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_ENCODEMB_H_
-#define VP10_ENCODER_ENCODEMB_H_
+#ifndef AV1_ENCODER_ENCODEMB_H_
+#define AV1_ENCODER_ENCODEMB_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/encoder/block.h"
#ifdef __cplusplus
@@ -32,52 +32,50 @@
int8_t enable_optimize_b;
};
-typedef enum VP10_XFORM_QUANT {
- VP10_XFORM_QUANT_FP = 0,
- VP10_XFORM_QUANT_B = 1,
- VP10_XFORM_QUANT_DC = 2,
- VP10_XFORM_QUANT_SKIP_QUANT = 3,
- VP10_XFORM_QUANT_LAST = 4
-} VP10_XFORM_QUANT;
+typedef enum AV1_XFORM_QUANT {
+ AV1_XFORM_QUANT_FP = 0,
+ AV1_XFORM_QUANT_B = 1,
+ AV1_XFORM_QUANT_DC = 2,
+ AV1_XFORM_QUANT_SKIP_QUANT = 3,
+ AV1_XFORM_QUANT_LAST = 4
+} AV1_XFORM_QUANT;
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
#if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
#endif // CONFIG_SUPERTX
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- VP10_XFORM_QUANT xform_quant_idx);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ AV1_XFORM_QUANT xform_quant_idx);
#if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx);
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx);
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx);
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx);
+void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ int ctx);
+void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx);
+void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx);
+void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int ctx);
#endif
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
- int ctx);
+int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+ int ctx);
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *arg);
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg);
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
- int enable_optimize_b);
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+ int enable_optimize_b);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEMB_H_
+#endif // AV1_ENCODER_ENCODEMB_H_
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index 78da2b7..a5e06a4 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -17,26 +17,26 @@
#include "av1/encoder/encodemv.h"
#include "av1/encoder/subexp.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-static struct vp10_token mv_joint_encodings[MV_JOINTS];
-static struct vp10_token mv_class_encodings[MV_CLASSES];
-static struct vp10_token mv_fp_encodings[MV_FP_SIZE];
-static struct vp10_token mv_class0_encodings[CLASS0_SIZE];
+static struct av1_token mv_joint_encodings[MV_JOINTS];
+static struct av1_token mv_class_encodings[MV_CLASSES];
+static struct av1_token mv_fp_encodings[MV_FP_SIZE];
+static struct av1_token mv_class0_encodings[CLASS0_SIZE];
-void vp10_entropy_mv_init(void) {
- vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree);
- vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree);
- vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree);
- vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
+void av1_entropy_mv_init(void) {
+ av1_tokens_from_tree(mv_joint_encodings, av1_mv_joint_tree);
+ av1_tokens_from_tree(mv_class_encodings, av1_mv_class_tree);
+ av1_tokens_from_tree(mv_class0_encodings, av1_mv_class0_tree);
+ av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
}
-static void encode_mv_component(vp10_writer *w, int comp,
+static void encode_mv_component(aom_writer *w, int comp,
const nmv_component *mvcomp, int usehp) {
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
- const int mv_class = vp10_get_mv_class(mag - 1, &offset);
+ const int mv_class = av1_get_mv_class(mag - 1, &offset);
const int d = offset >> 3; // int mv data
const int fr = (offset >> 1) & 3; // fractional mv data
const int hp = offset & 1; // high precision mv data
@@ -44,30 +44,30 @@
assert(comp != 0);
// Sign
- vp10_write(w, sign, mvcomp->sign);
+ aom_write(w, sign, mvcomp->sign);
// Class
- vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
- &mv_class_encodings[mv_class]);
+ av1_write_token(w, av1_mv_class_tree, mvcomp->classes,
+ &mv_class_encodings[mv_class]);
// Integer bits
if (mv_class == MV_CLASS_0) {
- vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
- &mv_class0_encodings[d]);
+ av1_write_token(w, av1_mv_class0_tree, mvcomp->class0,
+ &mv_class0_encodings[d]);
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
- for (i = 0; i < n; ++i) vp10_write(w, (d >> i) & 1, mvcomp->bits[i]);
+ for (i = 0; i < n; ++i) aom_write(w, (d >> i) & 1, mvcomp->bits[i]);
}
// Fractional bits
- vp10_write_token(w, vp10_mv_fp_tree,
- mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
- &mv_fp_encodings[fr]);
+ av1_write_token(w, av1_mv_fp_tree,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ &mv_fp_encodings[fr]);
// High precision bit
if (usehp)
- vp10_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+ aom_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
}
static void build_nmv_component_cost_table(int *mvcost,
@@ -79,30 +79,30 @@
int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
int class0_hp_cost[2], hp_cost[2];
- sign_cost[0] = vp10_cost_zero(mvcomp->sign);
- sign_cost[1] = vp10_cost_one(mvcomp->sign);
- vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree);
- vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree);
+ sign_cost[0] = av1_cost_zero(mvcomp->sign);
+ sign_cost[1] = av1_cost_one(mvcomp->sign);
+ av1_cost_tokens(class_cost, mvcomp->classes, av1_mv_class_tree);
+ av1_cost_tokens(class0_cost, mvcomp->class0, av1_mv_class0_tree);
for (i = 0; i < MV_OFFSET_BITS; ++i) {
- bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]);
- bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]);
+ bits_cost[i][0] = av1_cost_zero(mvcomp->bits[i]);
+ bits_cost[i][1] = av1_cost_one(mvcomp->bits[i]);
}
for (i = 0; i < CLASS0_SIZE; ++i)
- vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree);
- vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree);
+ av1_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], av1_mv_fp_tree);
+ av1_cost_tokens(fp_cost, mvcomp->fp, av1_mv_fp_tree);
if (usehp) {
- class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp);
- class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp);
- hp_cost[0] = vp10_cost_zero(mvcomp->hp);
- hp_cost[1] = vp10_cost_one(mvcomp->hp);
+ class0_hp_cost[0] = av1_cost_zero(mvcomp->class0_hp);
+ class0_hp_cost[1] = av1_cost_one(mvcomp->class0_hp);
+ hp_cost[0] = av1_cost_zero(mvcomp->hp);
+ hp_cost[1] = av1_cost_one(mvcomp->hp);
}
mvcost[0] = 0;
for (v = 1; v <= MV_MAX; ++v) {
int z, c, o, d, e, f, cost = 0;
z = v - 1;
- c = vp10_get_mv_class(z, &o);
+ c = av1_get_mv_class(z, &o);
cost += class_cost[c];
d = (o >> 3); /* int mv data */
f = (o >> 1) & 3; /* fractional pel mv data */
@@ -131,48 +131,48 @@
}
}
-static void update_mv(vp10_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
- vpx_prob upd_p) {
+static void update_mv(aom_writer *w, const unsigned int ct[2], aom_prob *cur_p,
+ aom_prob upd_p) {
(void)upd_p;
- vp10_cond_prob_diff_update(w, cur_p, ct);
+ av1_cond_prob_diff_update(w, cur_p, ct);
}
-static void write_mv_update(const vpx_tree_index *tree,
- vpx_prob probs[/*n - 1*/],
+static void write_mv_update(const aom_tree_index *tree,
+ aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
- vp10_writer *w) {
+ aom_writer *w) {
int i;
unsigned int branch_ct[32][2];
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i)
update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
}
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
- nmv_context_counts *const nmv_counts) {
+void aom_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
+ nmv_context_counts *const nmv_counts) {
int i, j;
#if CONFIG_REF_MV
int nmv_ctx = 0;
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
- write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+ write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
w);
- vp10_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
+ av1_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
nmv_component_counts *comp_counts = &counts->comps[i];
update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
- write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+ write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
MV_CLASSES, w);
- write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+ write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
CLASS0_SIZE, w);
for (j = 0; j < MV_OFFSET_BITS; ++j)
update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -180,10 +180,10 @@
for (i = 0; i < 2; ++i) {
for (j = 0; j < CLASS0_SIZE; ++j)
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
MV_FP_SIZE, w);
}
@@ -199,17 +199,16 @@
nmv_context *const mvc = &cm->fc->nmvc;
nmv_context_counts *const counts = nmv_counts;
- write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
- w);
+ write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
nmv_component_counts *comp_counts = &counts->comps[i];
update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
- write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+ write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
MV_CLASSES, w);
- write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+ write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
CLASS0_SIZE, w);
for (j = 0; j < MV_OFFSET_BITS; ++j)
update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -217,10 +216,10 @@
for (i = 0; i < 2; ++i) {
for (j = 0; j < CLASS0_SIZE; ++j)
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
MV_FP_SIZE, w);
}
@@ -234,18 +233,18 @@
#endif
}
-void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
#if CONFIG_REF_MV
- int is_compound,
+ int is_compound,
#endif
- const nmv_context *mvctx, int usehp) {
+ const nmv_context *mvctx, int usehp) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
- const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
- usehp = usehp && vp10_use_mv_hp(ref);
+ const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
+ usehp = usehp && av1_use_mv_hp(ref);
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
- vp10_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
+ aom_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
if (j == MV_JOINT_ZERO) return;
} else {
if (j == MV_JOINT_ZERO) assert(0);
@@ -256,8 +255,7 @@
(void)is_compound;
#endif
- vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
- &mv_joint_encodings[j]);
+ av1_write_token(w, av1_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
@@ -267,14 +265,14 @@
// If auto_mv_step_size is enabled then keep track of the largest
// motion vector component used.
if (cpi->sf.mv.auto_mv_step_size) {
- unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3;
- cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude);
+ unsigned int maxv = AOMMAX(abs(mv->row), abs(mv->col)) >> 3;
+ cpi->max_mv_magnitude = AOMMAX(maxv, cpi->max_mv_magnitude);
}
}
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context *ctx, int usehp) {
- vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+ const nmv_context *ctx, int usehp) {
+ av1_cost_tokens(mvjoint, ctx->joints, av1_mv_joint_tree);
build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
}
@@ -299,33 +297,33 @@
const MV diff = { mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
(void)pred_mvs;
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
const MV diff = { mvs[1].as_mv.row - ref->row,
mvs[1].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
const MV diff = { mvs[0].as_mv.row - ref->row,
mvs[0].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
}
@@ -348,32 +346,32 @@
const MV diff = { mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
const MV diff = { mvs[1].as_mv.row - ref->row,
mvs[1].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
const MV diff = { mvs[0].as_mv.row - ref->row,
mvs[0].as_mv.col - ref->col };
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
}
#else
@@ -390,8 +388,8 @@
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
const MV *ref = &pred_mvs[i].as_mv;
#else
@@ -399,12 +397,12 @@
#endif
const MV diff = { mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col };
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
}
#endif // CONFIG_EXT_INTER
-void vp10_update_mv_count(ThreadData *td) {
+void av1_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
diff --git a/av1/encoder/encodemv.h b/av1/encoder/encodemv.h
index 6cb57c2..edd913e 100644
--- a/av1/encoder/encodemv.h
+++ b/av1/encoder/encodemv.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_ENCODEMV_H_
-#define VP10_ENCODER_ENCODEMV_H_
+#ifndef AV1_ENCODER_ENCODEMV_H_
+#define AV1_ENCODER_ENCODEMV_H_
#include "av1/encoder/encoder.h"
@@ -17,24 +17,24 @@
extern "C" {
#endif
-void vp10_entropy_mv_init(void);
+void av1_entropy_mv_init(void);
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
- nmv_context_counts *const counts);
+void aom_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
+ nmv_context_counts *const counts);
-void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
#if CONFIG_REF_MV
- int is_compound,
+ int is_compound,
#endif
- const nmv_context *mvctx, int usehp);
+ const nmv_context *mvctx, int usehp);
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context *mvctx, int usehp);
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+ const nmv_context *mvctx, int usehp);
-void vp10_update_mv_count(ThreadData *td);
+void av1_update_mv_count(ThreadData *td);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEMV_H_
+#endif // AV1_ENCODER_ENCODEMV_H_
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 5196d9c..619204d 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -12,7 +12,7 @@
#include <math.h>
#include <stdio.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/alloccommon.h"
#if CONFIG_CLPF
@@ -52,19 +52,19 @@
#include "av1/encoder/speed_features.h"
#include "av1/encoder/temporal_filter.h"
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "aom_dsp/psnr.h"
#if CONFIG_INTERNAL_STATS
#include "aom_dsp/ssim.h"
#endif
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
#include "aom_util/debug_util.h"
#define AM_SEGMENT_ID_INACTIVE 7
@@ -96,7 +96,7 @@
FILE *keyfile;
#endif
-static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
+static INLINE void Scale2Ratio(AOM_SCALING mode, int *hr, int *hs) {
switch (mode) {
case NORMAL:
*hr = 1;
@@ -124,7 +124,7 @@
// Mark all inactive blocks as active. Other segmentation features may be set
// so memset cannot be used, instead only inactive blocks should be reset.
-static void suppress_active_map(VP10_COMP *cpi) {
+static void suppress_active_map(AV1_COMP *cpi) {
unsigned char *const seg_map = cpi->segmentation_map;
int i;
if (cpi->active_map.enabled || cpi->active_map.update)
@@ -133,7 +133,7 @@
seg_map[i] = AM_SEGMENT_ID_ACTIVE;
}
-static void apply_active_map(VP10_COMP *cpi) {
+static void apply_active_map(AV1_COMP *cpi) {
struct segmentation *const seg = &cpi->common.seg;
unsigned char *const seg_map = cpi->segmentation_map;
const unsigned char *const active_map = cpi->active_map.map;
@@ -150,16 +150,16 @@
if (cpi->active_map.enabled) {
for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
- vp10_enable_segmentation(seg);
- vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
- vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+ av1_enable_segmentation(seg);
+ av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
// Setting the data to -MAX_LOOP_FILTER will result in the computed loop
// filter level being zero regardless of the value of seg->abs_delta.
- vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
- -MAX_LOOP_FILTER);
+ av1_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+ -MAX_LOOP_FILTER);
} else {
- vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
- vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+ av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+ av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
if (seg->enabled) {
seg->update_data = 1;
seg->update_map = 1;
@@ -169,8 +169,8 @@
}
}
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
- int cols) {
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
unsigned char *const active_map_8x8 = cpi->active_map.map;
const int mi_rows = cpi->common.mi_rows;
@@ -196,8 +196,8 @@
}
}
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
- int cols) {
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
new_map_16x16) {
unsigned char *const seg_map_8x8 = cpi->segmentation_map;
@@ -221,7 +221,7 @@
}
}
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->td.mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
@@ -250,15 +250,15 @@
#endif
}
-static BLOCK_SIZE select_sb_size(const VP10_COMP *const cpi) {
+static BLOCK_SIZE select_sb_size(const AV1_COMP *const cpi) {
#if CONFIG_EXT_PARTITION
- if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_64X64)
+ if (cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_64X64)
return BLOCK_64X64;
- if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_128X128)
+ if (cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_128X128)
return BLOCK_128X128;
- assert(cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_DYNAMIC);
+ assert(cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_DYNAMIC);
assert(IMPLIES(cpi->common.tile_cols > 1,
cpi->common.tile_width % MAX_MIB_SIZE == 0));
@@ -273,15 +273,15 @@
#endif // CONFIG_EXT_PARTITION
}
-static void setup_frame(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void setup_frame(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// Set up entropy context depending on frame type. The decoder mandates
// the use of the default context, index 0, for keyframes and inter
// frames where the error_resilient_mode or intra_only flag is set. For
// other inter-frames the encoder currently uses only two contexts;
// context 1 for ALTREF frames and context 0 for the others.
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
- vp10_setup_past_independence(cm);
+ av1_setup_past_independence(cm);
} else {
#if CONFIG_EXT_REFS
const GF_GROUP *gf_group = &cpi->twopass.gf_group;
@@ -307,10 +307,10 @@
if (cm->frame_type == KEY_FRAME) {
cpi->refresh_golden_frame = 1;
cpi->refresh_alt_ref_frame = 1;
- vp10_zero(cpi->interp_filter_selected);
+ av1_zero(cpi->interp_filter_selected);
} else {
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
- vp10_zero(cpi->interp_filter_selected[0]);
+ av1_zero(cpi->interp_filter_selected[0]);
}
cpi->vaq_refresh = 0;
@@ -318,7 +318,7 @@
set_sb_size(cm, select_sb_size(cpi));
}
-static void vp10_enc_setup_mi(VP10_COMMON *cm) {
+static void av1_enc_setup_mi(AV1_COMMON *cm) {
int i;
cm->mi = cm->mip + cm->mi_stride + 1;
memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
@@ -336,34 +336,34 @@
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
-static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
- cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+static int av1_enc_alloc_mi(AV1_COMMON *cm, int mi_size) {
+ cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
- cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
+ cm->prev_mip = aom_calloc(mi_size, sizeof(*cm->prev_mip));
if (!cm->prev_mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ cm->mi_grid_base = (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->mi_grid_base) return 1;
cm->prev_mi_grid_base =
- (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->prev_mi_grid_base) return 1;
return 0;
}
-static void vp10_enc_free_mi(VP10_COMMON *cm) {
- vpx_free(cm->mip);
+static void av1_enc_free_mi(AV1_COMMON *cm) {
+ aom_free(cm->mip);
cm->mip = NULL;
- vpx_free(cm->prev_mip);
+ aom_free(cm->prev_mip);
cm->prev_mip = NULL;
- vpx_free(cm->mi_grid_base);
+ aom_free(cm->mi_grid_base);
cm->mi_grid_base = NULL;
- vpx_free(cm->prev_mi_grid_base);
+ aom_free(cm->prev_mi_grid_base);
cm->prev_mi_grid_base = NULL;
}
-static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
+static void av1_swap_mi_and_prev_mi(AV1_COMMON *cm) {
// Current mip will be the prev_mip for the next frame.
MODE_INFO **temp_base = cm->prev_mi_grid_base;
MODE_INFO *temp = cm->prev_mip;
@@ -380,47 +380,47 @@
cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
}
-void vp10_initialize_enc(void) {
+void av1_initialize_enc(void) {
static volatile int init_done = 0;
if (!init_done) {
- vp10_rtcd();
- vpx_dsp_rtcd();
- vpx_scale_rtcd();
- vp10_init_intra_predictors();
- vp10_init_me_luts();
- vp10_rc_init_minq_luts();
- vp10_entropy_mv_init();
- vp10_encode_token_init();
+ av1_rtcd();
+ aom_dsp_rtcd();
+ aom_scale_rtcd();
+ av1_init_intra_predictors();
+ av1_init_me_luts();
+ av1_rc_init_minq_luts();
+ av1_entropy_mv_init();
+ av1_encode_token_init();
#if CONFIG_EXT_INTER
- vp10_init_wedge_masks();
+ av1_init_wedge_masks();
#endif
init_done = 1;
}
}
-static void dealloc_compressor_data(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void dealloc_compressor_data(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int i;
- vpx_free(cpi->mbmi_ext_base);
+ aom_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
- vpx_free(cpi->tile_data);
+ aom_free(cpi->tile_data);
cpi->tile_data = NULL;
// Delete sementation map
- vpx_free(cpi->segmentation_map);
+ aom_free(cpi->segmentation_map);
cpi->segmentation_map = NULL;
- vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+ aom_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
- vpx_free(cpi->nmv_costs[i][0]);
- vpx_free(cpi->nmv_costs[i][1]);
- vpx_free(cpi->nmv_costs_hp[i][0]);
- vpx_free(cpi->nmv_costs_hp[i][1]);
+ aom_free(cpi->nmv_costs[i][0]);
+ aom_free(cpi->nmv_costs[i][1]);
+ aom_free(cpi->nmv_costs_hp[i][0]);
+ aom_free(cpi->nmv_costs_hp[i][1]);
cpi->nmv_costs[i][0] = NULL;
cpi->nmv_costs[i][1] = NULL;
cpi->nmv_costs_hp[i][0] = NULL;
@@ -428,81 +428,81 @@
}
#endif
- vpx_free(cpi->nmvcosts[0]);
- vpx_free(cpi->nmvcosts[1]);
+ aom_free(cpi->nmvcosts[0]);
+ aom_free(cpi->nmvcosts[1]);
cpi->nmvcosts[0] = NULL;
cpi->nmvcosts[1] = NULL;
- vpx_free(cpi->nmvcosts_hp[0]);
- vpx_free(cpi->nmvcosts_hp[1]);
+ aom_free(cpi->nmvcosts_hp[0]);
+ aom_free(cpi->nmvcosts_hp[1]);
cpi->nmvcosts_hp[0] = NULL;
cpi->nmvcosts_hp[1] = NULL;
- vpx_free(cpi->nmvsadcosts[0]);
- vpx_free(cpi->nmvsadcosts[1]);
+ aom_free(cpi->nmvsadcosts[0]);
+ aom_free(cpi->nmvsadcosts[1]);
cpi->nmvsadcosts[0] = NULL;
cpi->nmvsadcosts[1] = NULL;
- vpx_free(cpi->nmvsadcosts_hp[0]);
- vpx_free(cpi->nmvsadcosts_hp[1]);
+ aom_free(cpi->nmvsadcosts_hp[0]);
+ aom_free(cpi->nmvsadcosts_hp[1]);
cpi->nmvsadcosts_hp[0] = NULL;
cpi->nmvsadcosts_hp[1] = NULL;
- vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+ av1_cyclic_refresh_free(cpi->cyclic_refresh);
cpi->cyclic_refresh = NULL;
- vpx_free(cpi->active_map.map);
+ aom_free(cpi->active_map.map);
cpi->active_map.map = NULL;
// Free up-sampled reference buffers.
for (i = 0; i < (REF_FRAMES + 1); i++)
- vpx_free_frame_buffer(&cpi->upsampled_ref_bufs[i].buf);
+ aom_free_frame_buffer(&cpi->upsampled_ref_bufs[i].buf);
- vp10_free_ref_frame_buffers(cm->buffer_pool);
- vp10_free_context_buffers(cm);
+ av1_free_ref_frame_buffers(cm->buffer_pool);
+ av1_free_context_buffers(cm);
- vpx_free_frame_buffer(&cpi->last_frame_uf);
+ aom_free_frame_buffer(&cpi->last_frame_uf);
#if CONFIG_LOOP_RESTORATION
- vpx_free_frame_buffer(&cpi->last_frame_db);
- vp10_free_restoration_buffers(cm);
+ aom_free_frame_buffer(&cpi->last_frame_db);
+ av1_free_restoration_buffers(cm);
#endif // CONFIG_LOOP_RESTORATION
- vpx_free_frame_buffer(&cpi->scaled_source);
- vpx_free_frame_buffer(&cpi->scaled_last_source);
- vpx_free_frame_buffer(&cpi->alt_ref_buffer);
- vp10_lookahead_destroy(cpi->lookahead);
+ aom_free_frame_buffer(&cpi->scaled_source);
+ aom_free_frame_buffer(&cpi->scaled_last_source);
+ aom_free_frame_buffer(&cpi->alt_ref_buffer);
+ av1_lookahead_destroy(cpi->lookahead);
- vpx_free(cpi->tile_tok[0][0]);
+ aom_free(cpi->tile_tok[0][0]);
cpi->tile_tok[0][0] = 0;
- vp10_free_pc_tree(&cpi->td);
- vp10_free_var_tree(&cpi->td);
+ av1_free_pc_tree(&cpi->td);
+ av1_free_var_tree(&cpi->td);
if (cpi->common.allow_screen_content_tools)
- vpx_free(cpi->td.mb.palette_buffer);
+ aom_free(cpi->td.mb.palette_buffer);
if (cpi->source_diff_var != NULL) {
- vpx_free(cpi->source_diff_var);
+ aom_free(cpi->source_diff_var);
cpi->source_diff_var = NULL;
}
#if CONFIG_ANS
- vp10_buf_ans_free(&cpi->buf_ans);
+ av1_buf_ans_free(&cpi->buf_ans);
#endif // CONFIG_ANS
}
-static void save_coding_context(VP10_COMP *cpi) {
+static void save_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
#if CONFIG_REF_MV
int i;
#endif
// Stores a snapshot of key state variables which can subsequently be
-// restored with a call to vp10_restore_coding_context. These functions are
-// intended for use in a re-code loop in vp10_compress_frame where the
+// restored with a call to av1_restore_coding_context. These functions are
+// intended for use in a re-code loop in av1_compress_frame where the
// quantizer value is adjusted between loop iterations.
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
- vp10_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
+ av1_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
memcpy(cc->nmv_costs[i][0], cpi->nmv_costs[i][0],
MV_VALS * sizeof(*cpi->nmv_costs[i][0]));
memcpy(cc->nmv_costs[i][1], cpi->nmv_costs[i][1],
@@ -513,7 +513,7 @@
MV_VALS * sizeof(*cpi->nmv_costs_hp[i][1]));
}
#else
- vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+ av1_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
#endif
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
@@ -528,24 +528,24 @@
memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
(cm->mi_rows * cm->mi_cols));
- vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
- vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+ av1_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+ av1_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
cc->fc = *cm->fc;
}
-static void restore_coding_context(VP10_COMP *cpi) {
+static void restore_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
#if CONFIG_REF_MV
int i;
#endif
// Restore key state variables to the snapshot state stored in the
-// previous call to vp10_save_coding_context.
+// previous call to av1_save_coding_context.
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
- vp10_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
+ av1_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
memcpy(cpi->nmv_costs[i][0], cc->nmv_costs[i][0],
MV_VALS * sizeof(*cc->nmv_costs[i][0]));
memcpy(cpi->nmv_costs[i][1], cc->nmv_costs[i][1],
@@ -556,7 +556,7 @@
MV_VALS * sizeof(*cc->nmv_costs_hp[i][1]));
}
#else
- vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+ av1_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
#endif
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
@@ -569,14 +569,14 @@
memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
- vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
- vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+ av1_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+ av1_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
*cm->fc = cc->fc;
}
-static void configure_static_seg_features(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void configure_static_seg_features(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
struct segmentation *const seg = &cm->seg;
@@ -592,10 +592,10 @@
cpi->static_mb_pct = 0;
// Disable segmentation
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
// Clear down the segment features.
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
} else if (cpi->refresh_alt_ref_frame) {
// If this is an alt ref frame
// Clear down the global segmentation map
@@ -605,12 +605,12 @@
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
- vp10_disable_segmentation(seg);
- vp10_clearall_segfeatures(seg);
+ av1_disable_segmentation(seg);
+ av1_clearall_segfeatures(seg);
// Scan frames from current to arf frame.
// This function re-enables segmentation if appropriate.
- vp10_update_mbgraph_stats(cpi);
+ av1_update_mbgraph_stats(cpi);
// If segmentation was enabled set those features needed for the
// arf itself.
@@ -619,12 +619,12 @@
seg->update_data = 1;
qi_delta =
- vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+ av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Where relevant assume segment data is delta data
seg->abs_delta = SEGMENT_DELTADATA;
@@ -640,32 +640,32 @@
seg->update_data = 1;
seg->abs_delta = SEGMENT_DELTADATA;
- qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
- cm->bit_depth);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+ qi_delta =
+ av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Segment coding disabled for compred testing
if (high_q || (cpi->static_mb_pct == 100)) {
- vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
} else {
// Disable segmentation and clear down features if alt ref
// is not active for this group
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
}
} else if (rc->is_src_frame_alt_ref) {
// Special case where we are coding over the top of a previous
@@ -673,19 +673,19 @@
// Segment coding disabled for compred testing
// Enable ref frame features for segment 0 as well
- vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
// All mbs should use ALTREF_FRAME
- vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
- vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
- vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+ av1_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+ av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
// Skip all MBs if high Q (0,0 mv and skip coeffs)
if (high_q) {
- vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP);
- vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
// Enable data update
seg->update_data = 1;
@@ -699,8 +699,8 @@
}
}
-static void update_reference_segmentation_map(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void update_reference_segmentation_map(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map;
int row, col;
@@ -715,120 +715,120 @@
}
}
-static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static void alloc_raw_frame_buffers(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
if (!cpi->lookahead)
- cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
- cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
- cm->use_highbitdepth,
+ cpi->lookahead = av1_lookahead_init(oxcf->width, oxcf->height,
+ cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+ cm->use_highbitdepth,
#endif
- oxcf->lag_in_frames);
+ oxcf->lag_in_frames);
if (!cpi->lookahead)
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate lag buffers");
// TODO(agrange) Check if ARF is enabled and skip allocation if not.
- if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
+ if (aom_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate altref buffer");
}
-static void alloc_util_frame_buffers(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
- if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
+static void alloc_util_frame_buffers(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
+ if (aom_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
#if CONFIG_LOOP_RESTORATION
- if (vpx_realloc_frame_buffer(&cpi->last_frame_db, cm->width, cm->height,
+ if (aom_realloc_frame_buffer(&cpi->last_frame_db, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate last frame deblocked buffer");
#endif // CONFIG_LOOP_RESTORATION
- if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
+ if (aom_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled source buffer");
- if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
+ if (aom_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled last source buffer");
}
-static int alloc_context_buffers_ext(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+static int alloc_context_buffers_ext(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
- cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
+ cpi->mbmi_ext_base = aom_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
if (!cpi->mbmi_ext_base) return 1;
return 0;
}
-void vp10_alloc_compressor_data(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_alloc_compressor_data(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
- vp10_alloc_context_buffers(cm, cm->width, cm->height);
+ av1_alloc_context_buffers(cm, cm->width, cm->height);
alloc_context_buffers_ext(cpi);
- vpx_free(cpi->tile_tok[0][0]);
+ aom_free(cpi->tile_tok[0][0]);
{
unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
- vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+ aom_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
#if CONFIG_ANS
- vp10_buf_ans_alloc(&cpi->buf_ans, cm, tokens);
+ av1_buf_ans_alloc(&cpi->buf_ans, cm, tokens);
#endif // CONFIG_ANS
}
- vp10_setup_pc_tree(&cpi->common, &cpi->td);
+ av1_setup_pc_tree(&cpi->common, &cpi->td);
}
-void vp10_new_framerate(VP10_COMP *cpi, double framerate) {
+void av1_new_framerate(AV1_COMP *cpi, double framerate) {
cpi->framerate = framerate < 0.1 ? 30 : framerate;
- vp10_rc_update_framerate(cpi);
+ av1_rc_update_framerate(cpi);
}
-static void set_tile_info(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_tile_info(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
#if CONFIG_EXT_TILE
#if CONFIG_EXT_PARTITION
- if (cpi->oxcf.superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
+ if (cpi->oxcf.superblock_size != AOM_SUPERBLOCK_SIZE_64X64) {
cm->tile_width = clamp(cpi->oxcf.tile_columns, 1, 32);
cm->tile_height = clamp(cpi->oxcf.tile_rows, 1, 32);
cm->tile_width <<= MAX_MIB_SIZE_LOG2;
@@ -846,8 +846,8 @@
cm->tile_height <<= MAX_MIB_SIZE_LOG2;
#endif // CONFIG_EXT_PARTITION
- cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
- cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+ cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
+ cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
assert(cm->tile_width >> MAX_MIB_SIZE <= 32);
assert(cm->tile_height >> MAX_MIB_SIZE <= 32);
@@ -860,7 +860,7 @@
while (cm->tile_rows * cm->tile_height < cm->mi_rows) ++cm->tile_rows;
#else
int min_log2_tile_cols, max_log2_tile_cols;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
cm->log2_tile_cols =
clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
@@ -880,20 +880,20 @@
#endif // CONFIG_EXT_TILE
}
-static void update_frame_size(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void update_frame_size(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- vp10_set_mb_mi(cm, cm->width, cm->height);
- vp10_init_context_buffers(cm);
- vp10_init_macroblockd(cm, xd, NULL);
+ av1_set_mb_mi(cm, cm->width, cm->height);
+ av1_init_context_buffers(cm);
+ av1_init_macroblockd(cm, xd, NULL);
memset(cpi->mbmi_ext_base, 0,
cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
set_tile_info(cpi);
}
-static void init_buffer_indices(VP10_COMP *cpi) {
+static void init_buffer_indices(AV1_COMP *cpi) {
#if CONFIG_EXT_REFS
int fb_idx;
for (fb_idx = 0; fb_idx < LAST_REF_FRAMES; ++fb_idx)
@@ -910,15 +910,15 @@
#endif // CONFIG_EXT_REFS
}
-static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
- VP10_COMMON *const cm = &cpi->common;
+static void init_config(struct AV1_COMP *cpi, AV1EncoderConfig *oxcf) {
+ AV1_COMMON *const cm = &cpi->common;
cpi->oxcf = *oxcf;
cpi->framerate = oxcf->init_framerate;
cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = oxcf->use_highbitdepth;
#endif
cm->color_space = oxcf->color_space;
@@ -926,13 +926,13 @@
cm->width = oxcf->width;
cm->height = oxcf->height;
- vp10_alloc_compressor_data(cpi);
+ av1_alloc_compressor_data(cpi);
// Single thread case: use counts in common.
cpi->td.counts = &cm->counts;
// change includes all joint functionality
- vp10_change_config(cpi, oxcf);
+ av1_change_config(cpi, oxcf);
cpi->static_mb_pct = 0;
cpi->ref_frame_flags = 0;
@@ -941,7 +941,7 @@
}
static void set_rc_buffer_sizes(RATE_CONTROL *rc,
- const VP10EncoderConfig *oxcf) {
+ const AV1EncoderConfig *oxcf) {
const int64_t bandwidth = oxcf->target_bandwidth;
const int64_t starting = oxcf->starting_buffer_level_ms;
const int64_t optimal = oxcf->optimal_buffer_level_ms;
@@ -954,7 +954,7 @@
(maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
cpi->fn_ptr[BT].sdf = SDF; \
cpi->fn_ptr[BT].sdaf = SDAF; \
@@ -1064,73 +1064,73 @@
}
#if CONFIG_EXT_PARTITION
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x128)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x128_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad128x128x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad128x128x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x128x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x64_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x128)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x128_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x128x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad128x128)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad128x128_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad128x128x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad128x128x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad128x128x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad128x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad128x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad128x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x128)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x128_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x128x4d)
#endif // CONFIG_EXT_PARTITION
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad32x32x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad32x32x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad64x64x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad64x64x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x16x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x16x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x8x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x16x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x16x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x8x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x4x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x16_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x32_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad32x32x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad32x32x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x64_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad64x64x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad64x64x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x16_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad16x16x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad16x16x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x8_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad16x8x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad16x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x16_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad8x16x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x16x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x8_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad8x8x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x4)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x4_avg)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x4x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x4x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad4x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad4x8_avg)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad4x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad4x4)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad4x4_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad4x4x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad4x4x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x4x4d)
#if CONFIG_EXT_INTER
#define HIGHBD_MBFP(BT, MSDF, MVF, MSVF) \
@@ -1158,23 +1158,23 @@
}
#if CONFIG_EXT_PARTITION
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x128)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x128)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad128x128)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad128x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x128)
#endif // CONFIG_EXT_PARTITION
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x4)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x4)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x4)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad4x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
@@ -1201,704 +1201,704 @@
}
#if CONFIG_EXT_PARTITION
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x128)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x128)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad128x128)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad128x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x128)
#endif // CONFIG_EXT_PARTITION
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x4)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x4)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x4)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad4x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad4x4)
#endif // CONFIG_OBMC
-static void highbd_set_var_fns(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void highbd_set_var_fns(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
- vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
- vpx_highbd_8_sub_pixel_variance32x16,
- vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
- vpx_highbd_sad32x16x4d_bits8)
+ case AOM_BITS_8:
+ HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits8,
+ aom_highbd_sad32x16_avg_bits8, aom_highbd_8_variance32x16,
+ aom_highbd_8_sub_pixel_variance32x16,
+ aom_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
+ aom_highbd_sad32x16x4d_bits8)
- HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
- vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
- vpx_highbd_8_sub_pixel_variance16x32,
- vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
- vpx_highbd_sad16x32x4d_bits8)
+ HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits8,
+ aom_highbd_sad16x32_avg_bits8, aom_highbd_8_variance16x32,
+ aom_highbd_8_sub_pixel_variance16x32,
+ aom_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
+ aom_highbd_sad16x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
- vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
- vpx_highbd_8_sub_pixel_variance64x32,
- vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
- vpx_highbd_sad64x32x4d_bits8)
+ HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits8,
+ aom_highbd_sad64x32_avg_bits8, aom_highbd_8_variance64x32,
+ aom_highbd_8_sub_pixel_variance64x32,
+ aom_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
+ aom_highbd_sad64x32x4d_bits8)
- HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
- vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
- vpx_highbd_8_sub_pixel_variance32x64,
- vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
- vpx_highbd_sad32x64x4d_bits8)
+ HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits8,
+ aom_highbd_sad32x64_avg_bits8, aom_highbd_8_variance32x64,
+ aom_highbd_8_sub_pixel_variance32x64,
+ aom_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
+ aom_highbd_sad32x64x4d_bits8)
- HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
- vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
- vpx_highbd_8_sub_pixel_variance32x32,
- vpx_highbd_8_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
- vpx_highbd_sad32x32x4d_bits8)
+ HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits8,
+ aom_highbd_sad32x32_avg_bits8, aom_highbd_8_variance32x32,
+ aom_highbd_8_sub_pixel_variance32x32,
+ aom_highbd_8_sub_pixel_avg_variance32x32,
+ aom_highbd_sad32x32x3_bits8, aom_highbd_sad32x32x8_bits8,
+ aom_highbd_sad32x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
- vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
- vpx_highbd_8_sub_pixel_variance64x64,
- vpx_highbd_8_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
- vpx_highbd_sad64x64x4d_bits8)
+ HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits8,
+ aom_highbd_sad64x64_avg_bits8, aom_highbd_8_variance64x64,
+ aom_highbd_8_sub_pixel_variance64x64,
+ aom_highbd_8_sub_pixel_avg_variance64x64,
+ aom_highbd_sad64x64x3_bits8, aom_highbd_sad64x64x8_bits8,
+ aom_highbd_sad64x64x4d_bits8)
- HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
- vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
- vpx_highbd_8_sub_pixel_variance16x16,
- vpx_highbd_8_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
- vpx_highbd_sad16x16x4d_bits8)
+ HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits8,
+ aom_highbd_sad16x16_avg_bits8, aom_highbd_8_variance16x16,
+ aom_highbd_8_sub_pixel_variance16x16,
+ aom_highbd_8_sub_pixel_avg_variance16x16,
+ aom_highbd_sad16x16x3_bits8, aom_highbd_sad16x16x8_bits8,
+ aom_highbd_sad16x16x4d_bits8)
HIGHBD_BFP(
- BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
- vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
- vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
- vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
+ BLOCK_16X8, aom_highbd_sad16x8_bits8, aom_highbd_sad16x8_avg_bits8,
+ aom_highbd_8_variance16x8, aom_highbd_8_sub_pixel_variance16x8,
+ aom_highbd_8_sub_pixel_avg_variance16x8, aom_highbd_sad16x8x3_bits8,
+ aom_highbd_sad16x8x8_bits8, aom_highbd_sad16x8x4d_bits8)
HIGHBD_BFP(
- BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
- vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
- vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
- vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
+ BLOCK_8X16, aom_highbd_sad8x16_bits8, aom_highbd_sad8x16_avg_bits8,
+ aom_highbd_8_variance8x16, aom_highbd_8_sub_pixel_variance8x16,
+ aom_highbd_8_sub_pixel_avg_variance8x16, aom_highbd_sad8x16x3_bits8,
+ aom_highbd_sad8x16x8_bits8, aom_highbd_sad8x16x4d_bits8)
HIGHBD_BFP(
- BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
- vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
- vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
- vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
+ BLOCK_8X8, aom_highbd_sad8x8_bits8, aom_highbd_sad8x8_avg_bits8,
+ aom_highbd_8_variance8x8, aom_highbd_8_sub_pixel_variance8x8,
+ aom_highbd_8_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits8,
+ aom_highbd_sad8x8x8_bits8, aom_highbd_sad8x8x4d_bits8)
- HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
- vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
- vpx_highbd_8_sub_pixel_variance8x4,
- vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
- vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
+ HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits8,
+ aom_highbd_sad8x4_avg_bits8, aom_highbd_8_variance8x4,
+ aom_highbd_8_sub_pixel_variance8x4,
+ aom_highbd_8_sub_pixel_avg_variance8x4, NULL,
+ aom_highbd_sad8x4x8_bits8, aom_highbd_sad8x4x4d_bits8)
- HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
- vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
- vpx_highbd_8_sub_pixel_variance4x8,
- vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
- vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
+ HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits8,
+ aom_highbd_sad4x8_avg_bits8, aom_highbd_8_variance4x8,
+ aom_highbd_8_sub_pixel_variance4x8,
+ aom_highbd_8_sub_pixel_avg_variance4x8, NULL,
+ aom_highbd_sad4x8x8_bits8, aom_highbd_sad4x8x4d_bits8)
HIGHBD_BFP(
- BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
- vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
- vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
- vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
+ BLOCK_4X4, aom_highbd_sad4x4_bits8, aom_highbd_sad4x4_avg_bits8,
+ aom_highbd_8_variance4x4, aom_highbd_8_sub_pixel_variance4x4,
+ aom_highbd_8_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits8,
+ aom_highbd_sad4x4x8_bits8, aom_highbd_sad4x4x4d_bits8)
#if CONFIG_EXT_PARTITION
- HIGHBD_BFP(BLOCK_128X128, vpx_highbd_sad128x128_bits8,
- vpx_highbd_sad128x128_avg_bits8,
- vpx_highbd_8_variance128x128,
- vpx_highbd_8_sub_pixel_variance128x128,
- vpx_highbd_8_sub_pixel_avg_variance128x128,
- vpx_highbd_sad128x128x3_bits8, vpx_highbd_sad128x128x8_bits8,
- vpx_highbd_sad128x128x4d_bits8)
+ HIGHBD_BFP(BLOCK_128X128, aom_highbd_sad128x128_bits8,
+ aom_highbd_sad128x128_avg_bits8,
+ aom_highbd_8_variance128x128,
+ aom_highbd_8_sub_pixel_variance128x128,
+ aom_highbd_8_sub_pixel_avg_variance128x128,
+ aom_highbd_sad128x128x3_bits8, aom_highbd_sad128x128x8_bits8,
+ aom_highbd_sad128x128x4d_bits8)
- HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits8,
- vpx_highbd_sad128x64_avg_bits8, vpx_highbd_8_variance128x64,
- vpx_highbd_8_sub_pixel_variance128x64,
- vpx_highbd_8_sub_pixel_avg_variance128x64, NULL, NULL,
- vpx_highbd_sad128x64x4d_bits8)
+ HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits8,
+ aom_highbd_sad128x64_avg_bits8, aom_highbd_8_variance128x64,
+ aom_highbd_8_sub_pixel_variance128x64,
+ aom_highbd_8_sub_pixel_avg_variance128x64, NULL, NULL,
+ aom_highbd_sad128x64x4d_bits8)
- HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits8,
- vpx_highbd_sad64x128_avg_bits8, vpx_highbd_8_variance64x128,
- vpx_highbd_8_sub_pixel_variance64x128,
- vpx_highbd_8_sub_pixel_avg_variance64x128, NULL, NULL,
- vpx_highbd_sad64x128x4d_bits8)
+ HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits8,
+ aom_highbd_sad64x128_avg_bits8, aom_highbd_8_variance64x128,
+ aom_highbd_8_sub_pixel_variance64x128,
+ aom_highbd_8_sub_pixel_avg_variance64x128, NULL, NULL,
+ aom_highbd_sad64x128x4d_bits8)
#endif // CONFIG_EXT_PARTITION
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits8,
- vpx_highbd_masked_variance128x128,
- vpx_highbd_masked_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits8,
- vpx_highbd_masked_variance128x64,
- vpx_highbd_masked_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits8,
- vpx_highbd_masked_variance64x128,
- vpx_highbd_masked_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits8,
+ aom_highbd_masked_variance128x128,
+ aom_highbd_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits8,
+ aom_highbd_masked_variance128x64,
+ aom_highbd_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits8,
+ aom_highbd_masked_variance64x128,
+ aom_highbd_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits8,
- vpx_highbd_masked_variance64x64,
- vpx_highbd_masked_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits8,
- vpx_highbd_masked_variance64x32,
- vpx_highbd_masked_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits8,
- vpx_highbd_masked_variance32x64,
- vpx_highbd_masked_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits8,
- vpx_highbd_masked_variance32x32,
- vpx_highbd_masked_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits8,
- vpx_highbd_masked_variance32x16,
- vpx_highbd_masked_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits8,
- vpx_highbd_masked_variance16x32,
- vpx_highbd_masked_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits8,
- vpx_highbd_masked_variance16x16,
- vpx_highbd_masked_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits8,
- vpx_highbd_masked_variance8x16,
- vpx_highbd_masked_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits8,
- vpx_highbd_masked_variance16x8,
- vpx_highbd_masked_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits8,
- vpx_highbd_masked_variance8x8,
- vpx_highbd_masked_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits8,
- vpx_highbd_masked_variance4x8,
- vpx_highbd_masked_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits8,
- vpx_highbd_masked_variance8x4,
- vpx_highbd_masked_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits8,
- vpx_highbd_masked_variance4x4,
- vpx_highbd_masked_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits8,
+ aom_highbd_masked_variance64x64,
+ aom_highbd_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits8,
+ aom_highbd_masked_variance64x32,
+ aom_highbd_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits8,
+ aom_highbd_masked_variance32x64,
+ aom_highbd_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits8,
+ aom_highbd_masked_variance32x32,
+ aom_highbd_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits8,
+ aom_highbd_masked_variance32x16,
+ aom_highbd_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits8,
+ aom_highbd_masked_variance16x32,
+ aom_highbd_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits8,
+ aom_highbd_masked_variance16x16,
+ aom_highbd_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits8,
+ aom_highbd_masked_variance8x16,
+ aom_highbd_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits8,
+ aom_highbd_masked_variance16x8,
+ aom_highbd_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits8,
+ aom_highbd_masked_variance8x8,
+ aom_highbd_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits8,
+ aom_highbd_masked_variance4x8,
+ aom_highbd_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits8,
+ aom_highbd_masked_variance8x4,
+ aom_highbd_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits8,
+ aom_highbd_masked_variance4x4,
+ aom_highbd_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
#if CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits8,
- vpx_highbd_obmc_variance128x128,
- vpx_highbd_obmc_sub_pixel_variance128x128)
- HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits8,
- vpx_highbd_obmc_variance128x64,
- vpx_highbd_obmc_sub_pixel_variance128x64)
- HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits8,
- vpx_highbd_obmc_variance64x128,
- vpx_highbd_obmc_sub_pixel_variance64x128)
+ HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits8,
+ aom_highbd_obmc_variance128x128,
+ aom_highbd_obmc_sub_pixel_variance128x128)
+ HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits8,
+ aom_highbd_obmc_variance128x64,
+ aom_highbd_obmc_sub_pixel_variance128x64)
+ HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits8,
+ aom_highbd_obmc_variance64x128,
+ aom_highbd_obmc_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits8,
- vpx_highbd_obmc_variance64x64,
- vpx_highbd_obmc_sub_pixel_variance64x64)
- HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits8,
- vpx_highbd_obmc_variance64x32,
- vpx_highbd_obmc_sub_pixel_variance64x32)
- HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits8,
- vpx_highbd_obmc_variance32x64,
- vpx_highbd_obmc_sub_pixel_variance32x64)
- HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits8,
- vpx_highbd_obmc_variance32x32,
- vpx_highbd_obmc_sub_pixel_variance32x32)
- HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits8,
- vpx_highbd_obmc_variance32x16,
- vpx_highbd_obmc_sub_pixel_variance32x16)
- HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits8,
- vpx_highbd_obmc_variance16x32,
- vpx_highbd_obmc_sub_pixel_variance16x32)
- HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits8,
- vpx_highbd_obmc_variance16x16,
- vpx_highbd_obmc_sub_pixel_variance16x16)
- HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits8,
- vpx_highbd_obmc_variance8x16,
- vpx_highbd_obmc_sub_pixel_variance8x16)
- HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits8,
- vpx_highbd_obmc_variance16x8,
- vpx_highbd_obmc_sub_pixel_variance16x8)
- HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits8,
- vpx_highbd_obmc_variance8x8,
- vpx_highbd_obmc_sub_pixel_variance8x8)
- HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits8,
- vpx_highbd_obmc_variance4x8,
- vpx_highbd_obmc_sub_pixel_variance4x8)
- HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits8,
- vpx_highbd_obmc_variance8x4,
- vpx_highbd_obmc_sub_pixel_variance8x4)
- HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits8,
- vpx_highbd_obmc_variance4x4,
- vpx_highbd_obmc_sub_pixel_variance4x4)
+ HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits8,
+ aom_highbd_obmc_variance64x64,
+ aom_highbd_obmc_sub_pixel_variance64x64)
+ HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits8,
+ aom_highbd_obmc_variance64x32,
+ aom_highbd_obmc_sub_pixel_variance64x32)
+ HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits8,
+ aom_highbd_obmc_variance32x64,
+ aom_highbd_obmc_sub_pixel_variance32x64)
+ HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits8,
+ aom_highbd_obmc_variance32x32,
+ aom_highbd_obmc_sub_pixel_variance32x32)
+ HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits8,
+ aom_highbd_obmc_variance32x16,
+ aom_highbd_obmc_sub_pixel_variance32x16)
+ HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits8,
+ aom_highbd_obmc_variance16x32,
+ aom_highbd_obmc_sub_pixel_variance16x32)
+ HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits8,
+ aom_highbd_obmc_variance16x16,
+ aom_highbd_obmc_sub_pixel_variance16x16)
+ HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits8,
+ aom_highbd_obmc_variance8x16,
+ aom_highbd_obmc_sub_pixel_variance8x16)
+ HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits8,
+ aom_highbd_obmc_variance16x8,
+ aom_highbd_obmc_sub_pixel_variance16x8)
+ HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits8,
+ aom_highbd_obmc_variance8x8,
+ aom_highbd_obmc_sub_pixel_variance8x8)
+ HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits8,
+ aom_highbd_obmc_variance4x8,
+ aom_highbd_obmc_sub_pixel_variance4x8)
+ HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits8,
+ aom_highbd_obmc_variance8x4,
+ aom_highbd_obmc_sub_pixel_variance8x4)
+ HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits8,
+ aom_highbd_obmc_variance4x4,
+ aom_highbd_obmc_sub_pixel_variance4x4)
#endif // CONFIG_OBMC
break;
- case VPX_BITS_10:
- HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
- vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
- vpx_highbd_10_sub_pixel_variance32x16,
- vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
- vpx_highbd_sad32x16x4d_bits10)
+ case AOM_BITS_10:
+ HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits10,
+ aom_highbd_sad32x16_avg_bits10, aom_highbd_10_variance32x16,
+ aom_highbd_10_sub_pixel_variance32x16,
+ aom_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
+ aom_highbd_sad32x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
- vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
- vpx_highbd_10_sub_pixel_variance16x32,
- vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
- vpx_highbd_sad16x32x4d_bits10)
+ HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits10,
+ aom_highbd_sad16x32_avg_bits10, aom_highbd_10_variance16x32,
+ aom_highbd_10_sub_pixel_variance16x32,
+ aom_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
+ aom_highbd_sad16x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
- vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
- vpx_highbd_10_sub_pixel_variance64x32,
- vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
- vpx_highbd_sad64x32x4d_bits10)
+ HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits10,
+ aom_highbd_sad64x32_avg_bits10, aom_highbd_10_variance64x32,
+ aom_highbd_10_sub_pixel_variance64x32,
+ aom_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
+ aom_highbd_sad64x32x4d_bits10)
- HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
- vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
- vpx_highbd_10_sub_pixel_variance32x64,
- vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
- vpx_highbd_sad32x64x4d_bits10)
+ HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits10,
+ aom_highbd_sad32x64_avg_bits10, aom_highbd_10_variance32x64,
+ aom_highbd_10_sub_pixel_variance32x64,
+ aom_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
+ aom_highbd_sad32x64x4d_bits10)
- HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
- vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
- vpx_highbd_10_sub_pixel_variance32x32,
- vpx_highbd_10_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
- vpx_highbd_sad32x32x4d_bits10)
+ HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits10,
+ aom_highbd_sad32x32_avg_bits10, aom_highbd_10_variance32x32,
+ aom_highbd_10_sub_pixel_variance32x32,
+ aom_highbd_10_sub_pixel_avg_variance32x32,
+ aom_highbd_sad32x32x3_bits10, aom_highbd_sad32x32x8_bits10,
+ aom_highbd_sad32x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
- vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
- vpx_highbd_10_sub_pixel_variance64x64,
- vpx_highbd_10_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
- vpx_highbd_sad64x64x4d_bits10)
+ HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits10,
+ aom_highbd_sad64x64_avg_bits10, aom_highbd_10_variance64x64,
+ aom_highbd_10_sub_pixel_variance64x64,
+ aom_highbd_10_sub_pixel_avg_variance64x64,
+ aom_highbd_sad64x64x3_bits10, aom_highbd_sad64x64x8_bits10,
+ aom_highbd_sad64x64x4d_bits10)
- HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
- vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
- vpx_highbd_10_sub_pixel_variance16x16,
- vpx_highbd_10_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
- vpx_highbd_sad16x16x4d_bits10)
+ HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits10,
+ aom_highbd_sad16x16_avg_bits10, aom_highbd_10_variance16x16,
+ aom_highbd_10_sub_pixel_variance16x16,
+ aom_highbd_10_sub_pixel_avg_variance16x16,
+ aom_highbd_sad16x16x3_bits10, aom_highbd_sad16x16x8_bits10,
+ aom_highbd_sad16x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
- vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
- vpx_highbd_10_sub_pixel_variance16x8,
- vpx_highbd_10_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
- vpx_highbd_sad16x8x4d_bits10)
+ HIGHBD_BFP(BLOCK_16X8, aom_highbd_sad16x8_bits10,
+ aom_highbd_sad16x8_avg_bits10, aom_highbd_10_variance16x8,
+ aom_highbd_10_sub_pixel_variance16x8,
+ aom_highbd_10_sub_pixel_avg_variance16x8,
+ aom_highbd_sad16x8x3_bits10, aom_highbd_sad16x8x8_bits10,
+ aom_highbd_sad16x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
- vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
- vpx_highbd_10_sub_pixel_variance8x16,
- vpx_highbd_10_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
- vpx_highbd_sad8x16x4d_bits10)
+ HIGHBD_BFP(BLOCK_8X16, aom_highbd_sad8x16_bits10,
+ aom_highbd_sad8x16_avg_bits10, aom_highbd_10_variance8x16,
+ aom_highbd_10_sub_pixel_variance8x16,
+ aom_highbd_10_sub_pixel_avg_variance8x16,
+ aom_highbd_sad8x16x3_bits10, aom_highbd_sad8x16x8_bits10,
+ aom_highbd_sad8x16x4d_bits10)
HIGHBD_BFP(
- BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
- vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
- vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
- vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
+ BLOCK_8X8, aom_highbd_sad8x8_bits10, aom_highbd_sad8x8_avg_bits10,
+ aom_highbd_10_variance8x8, aom_highbd_10_sub_pixel_variance8x8,
+ aom_highbd_10_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits10,
+ aom_highbd_sad8x8x8_bits10, aom_highbd_sad8x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
- vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
- vpx_highbd_10_sub_pixel_variance8x4,
- vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
- vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
+ HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits10,
+ aom_highbd_sad8x4_avg_bits10, aom_highbd_10_variance8x4,
+ aom_highbd_10_sub_pixel_variance8x4,
+ aom_highbd_10_sub_pixel_avg_variance8x4, NULL,
+ aom_highbd_sad8x4x8_bits10, aom_highbd_sad8x4x4d_bits10)
- HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
- vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
- vpx_highbd_10_sub_pixel_variance4x8,
- vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
- vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
+ HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits10,
+ aom_highbd_sad4x8_avg_bits10, aom_highbd_10_variance4x8,
+ aom_highbd_10_sub_pixel_variance4x8,
+ aom_highbd_10_sub_pixel_avg_variance4x8, NULL,
+ aom_highbd_sad4x8x8_bits10, aom_highbd_sad4x8x4d_bits10)
HIGHBD_BFP(
- BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
- vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
- vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
- vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
+ BLOCK_4X4, aom_highbd_sad4x4_bits10, aom_highbd_sad4x4_avg_bits10,
+ aom_highbd_10_variance4x4, aom_highbd_10_sub_pixel_variance4x4,
+ aom_highbd_10_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits10,
+ aom_highbd_sad4x4x8_bits10, aom_highbd_sad4x4x4d_bits10)
#if CONFIG_EXT_PARTITION
HIGHBD_BFP(
- BLOCK_128X128, vpx_highbd_sad128x128_bits10,
- vpx_highbd_sad128x128_avg_bits10, vpx_highbd_10_variance128x128,
- vpx_highbd_10_sub_pixel_variance128x128,
- vpx_highbd_10_sub_pixel_avg_variance128x128,
- vpx_highbd_sad128x128x3_bits10, vpx_highbd_sad128x128x8_bits10,
- vpx_highbd_sad128x128x4d_bits10)
+ BLOCK_128X128, aom_highbd_sad128x128_bits10,
+ aom_highbd_sad128x128_avg_bits10, aom_highbd_10_variance128x128,
+ aom_highbd_10_sub_pixel_variance128x128,
+ aom_highbd_10_sub_pixel_avg_variance128x128,
+ aom_highbd_sad128x128x3_bits10, aom_highbd_sad128x128x8_bits10,
+ aom_highbd_sad128x128x4d_bits10)
- HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits10,
- vpx_highbd_sad128x64_avg_bits10,
- vpx_highbd_10_variance128x64,
- vpx_highbd_10_sub_pixel_variance128x64,
- vpx_highbd_10_sub_pixel_avg_variance128x64, NULL, NULL,
- vpx_highbd_sad128x64x4d_bits10)
+ HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits10,
+ aom_highbd_sad128x64_avg_bits10,
+ aom_highbd_10_variance128x64,
+ aom_highbd_10_sub_pixel_variance128x64,
+ aom_highbd_10_sub_pixel_avg_variance128x64, NULL, NULL,
+ aom_highbd_sad128x64x4d_bits10)
- HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits10,
- vpx_highbd_sad64x128_avg_bits10,
- vpx_highbd_10_variance64x128,
- vpx_highbd_10_sub_pixel_variance64x128,
- vpx_highbd_10_sub_pixel_avg_variance64x128, NULL, NULL,
- vpx_highbd_sad64x128x4d_bits10)
+ HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits10,
+ aom_highbd_sad64x128_avg_bits10,
+ aom_highbd_10_variance64x128,
+ aom_highbd_10_sub_pixel_variance64x128,
+ aom_highbd_10_sub_pixel_avg_variance64x128, NULL, NULL,
+ aom_highbd_sad64x128x4d_bits10)
#endif // CONFIG_EXT_PARTITION
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits10,
- vpx_highbd_10_masked_variance128x128,
- vpx_highbd_10_masked_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits10,
- vpx_highbd_10_masked_variance128x64,
- vpx_highbd_10_masked_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits10,
- vpx_highbd_10_masked_variance64x128,
- vpx_highbd_10_masked_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits10,
+ aom_highbd_10_masked_variance128x128,
+ aom_highbd_10_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits10,
+ aom_highbd_10_masked_variance128x64,
+ aom_highbd_10_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits10,
+ aom_highbd_10_masked_variance64x128,
+ aom_highbd_10_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits10,
- vpx_highbd_10_masked_variance64x64,
- vpx_highbd_10_masked_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits10,
- vpx_highbd_10_masked_variance64x32,
- vpx_highbd_10_masked_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits10,
- vpx_highbd_10_masked_variance32x64,
- vpx_highbd_10_masked_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits10,
- vpx_highbd_10_masked_variance32x32,
- vpx_highbd_10_masked_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits10,
- vpx_highbd_10_masked_variance32x16,
- vpx_highbd_10_masked_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits10,
- vpx_highbd_10_masked_variance16x32,
- vpx_highbd_10_masked_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits10,
- vpx_highbd_10_masked_variance16x16,
- vpx_highbd_10_masked_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits10,
- vpx_highbd_10_masked_variance8x16,
- vpx_highbd_10_masked_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits10,
- vpx_highbd_10_masked_variance16x8,
- vpx_highbd_10_masked_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits10,
- vpx_highbd_10_masked_variance8x8,
- vpx_highbd_10_masked_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits10,
- vpx_highbd_10_masked_variance4x8,
- vpx_highbd_10_masked_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits10,
- vpx_highbd_10_masked_variance8x4,
- vpx_highbd_10_masked_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits10,
- vpx_highbd_10_masked_variance4x4,
- vpx_highbd_10_masked_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits10,
+ aom_highbd_10_masked_variance64x64,
+ aom_highbd_10_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits10,
+ aom_highbd_10_masked_variance64x32,
+ aom_highbd_10_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits10,
+ aom_highbd_10_masked_variance32x64,
+ aom_highbd_10_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits10,
+ aom_highbd_10_masked_variance32x32,
+ aom_highbd_10_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits10,
+ aom_highbd_10_masked_variance32x16,
+ aom_highbd_10_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits10,
+ aom_highbd_10_masked_variance16x32,
+ aom_highbd_10_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits10,
+ aom_highbd_10_masked_variance16x16,
+ aom_highbd_10_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits10,
+ aom_highbd_10_masked_variance8x16,
+ aom_highbd_10_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits10,
+ aom_highbd_10_masked_variance16x8,
+ aom_highbd_10_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits10,
+ aom_highbd_10_masked_variance8x8,
+ aom_highbd_10_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits10,
+ aom_highbd_10_masked_variance4x8,
+ aom_highbd_10_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits10,
+ aom_highbd_10_masked_variance8x4,
+ aom_highbd_10_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits10,
+ aom_highbd_10_masked_variance4x4,
+ aom_highbd_10_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
#if CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits10,
- vpx_highbd_10_obmc_variance128x128,
- vpx_highbd_10_obmc_sub_pixel_variance128x128)
- HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits10,
- vpx_highbd_10_obmc_variance128x64,
- vpx_highbd_10_obmc_sub_pixel_variance128x64)
- HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits10,
- vpx_highbd_10_obmc_variance64x128,
- vpx_highbd_10_obmc_sub_pixel_variance64x128)
+ HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits10,
+ aom_highbd_10_obmc_variance128x128,
+ aom_highbd_10_obmc_sub_pixel_variance128x128)
+ HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits10,
+ aom_highbd_10_obmc_variance128x64,
+ aom_highbd_10_obmc_sub_pixel_variance128x64)
+ HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits10,
+ aom_highbd_10_obmc_variance64x128,
+ aom_highbd_10_obmc_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits10,
- vpx_highbd_10_obmc_variance64x64,
- vpx_highbd_10_obmc_sub_pixel_variance64x64)
- HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits10,
- vpx_highbd_10_obmc_variance64x32,
- vpx_highbd_10_obmc_sub_pixel_variance64x32)
- HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits10,
- vpx_highbd_10_obmc_variance32x64,
- vpx_highbd_10_obmc_sub_pixel_variance32x64)
- HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits10,
- vpx_highbd_10_obmc_variance32x32,
- vpx_highbd_10_obmc_sub_pixel_variance32x32)
- HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits10,
- vpx_highbd_10_obmc_variance32x16,
- vpx_highbd_10_obmc_sub_pixel_variance32x16)
- HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits10,
- vpx_highbd_10_obmc_variance16x32,
- vpx_highbd_10_obmc_sub_pixel_variance16x32)
- HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits10,
- vpx_highbd_10_obmc_variance16x16,
- vpx_highbd_10_obmc_sub_pixel_variance16x16)
- HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits10,
- vpx_highbd_10_obmc_variance8x16,
- vpx_highbd_10_obmc_sub_pixel_variance8x16)
- HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits10,
- vpx_highbd_10_obmc_variance16x8,
- vpx_highbd_10_obmc_sub_pixel_variance16x8)
- HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits10,
- vpx_highbd_10_obmc_variance8x8,
- vpx_highbd_10_obmc_sub_pixel_variance8x8)
- HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits10,
- vpx_highbd_10_obmc_variance4x8,
- vpx_highbd_10_obmc_sub_pixel_variance4x8)
- HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits10,
- vpx_highbd_10_obmc_variance8x4,
- vpx_highbd_10_obmc_sub_pixel_variance8x4)
- HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits10,
- vpx_highbd_10_obmc_variance4x4,
- vpx_highbd_10_obmc_sub_pixel_variance4x4)
+ HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits10,
+ aom_highbd_10_obmc_variance64x64,
+ aom_highbd_10_obmc_sub_pixel_variance64x64)
+ HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits10,
+ aom_highbd_10_obmc_variance64x32,
+ aom_highbd_10_obmc_sub_pixel_variance64x32)
+ HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits10,
+ aom_highbd_10_obmc_variance32x64,
+ aom_highbd_10_obmc_sub_pixel_variance32x64)
+ HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits10,
+ aom_highbd_10_obmc_variance32x32,
+ aom_highbd_10_obmc_sub_pixel_variance32x32)
+ HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits10,
+ aom_highbd_10_obmc_variance32x16,
+ aom_highbd_10_obmc_sub_pixel_variance32x16)
+ HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits10,
+ aom_highbd_10_obmc_variance16x32,
+ aom_highbd_10_obmc_sub_pixel_variance16x32)
+ HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits10,
+ aom_highbd_10_obmc_variance16x16,
+ aom_highbd_10_obmc_sub_pixel_variance16x16)
+ HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits10,
+ aom_highbd_10_obmc_variance8x16,
+ aom_highbd_10_obmc_sub_pixel_variance8x16)
+ HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits10,
+ aom_highbd_10_obmc_variance16x8,
+ aom_highbd_10_obmc_sub_pixel_variance16x8)
+ HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits10,
+ aom_highbd_10_obmc_variance8x8,
+ aom_highbd_10_obmc_sub_pixel_variance8x8)
+ HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits10,
+ aom_highbd_10_obmc_variance4x8,
+ aom_highbd_10_obmc_sub_pixel_variance4x8)
+ HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits10,
+ aom_highbd_10_obmc_variance8x4,
+ aom_highbd_10_obmc_sub_pixel_variance8x4)
+ HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits10,
+ aom_highbd_10_obmc_variance4x4,
+ aom_highbd_10_obmc_sub_pixel_variance4x4)
#endif // CONFIG_OBMC
break;
- case VPX_BITS_12:
- HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
- vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
- vpx_highbd_12_sub_pixel_variance32x16,
- vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
- vpx_highbd_sad32x16x4d_bits12)
+ case AOM_BITS_12:
+ HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits12,
+ aom_highbd_sad32x16_avg_bits12, aom_highbd_12_variance32x16,
+ aom_highbd_12_sub_pixel_variance32x16,
+ aom_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
+ aom_highbd_sad32x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
- vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
- vpx_highbd_12_sub_pixel_variance16x32,
- vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
- vpx_highbd_sad16x32x4d_bits12)
+ HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits12,
+ aom_highbd_sad16x32_avg_bits12, aom_highbd_12_variance16x32,
+ aom_highbd_12_sub_pixel_variance16x32,
+ aom_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
+ aom_highbd_sad16x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
- vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
- vpx_highbd_12_sub_pixel_variance64x32,
- vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
- vpx_highbd_sad64x32x4d_bits12)
+ HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits12,
+ aom_highbd_sad64x32_avg_bits12, aom_highbd_12_variance64x32,
+ aom_highbd_12_sub_pixel_variance64x32,
+ aom_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
+ aom_highbd_sad64x32x4d_bits12)
- HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
- vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
- vpx_highbd_12_sub_pixel_variance32x64,
- vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
- vpx_highbd_sad32x64x4d_bits12)
+ HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits12,
+ aom_highbd_sad32x64_avg_bits12, aom_highbd_12_variance32x64,
+ aom_highbd_12_sub_pixel_variance32x64,
+ aom_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
+ aom_highbd_sad32x64x4d_bits12)
- HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
- vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
- vpx_highbd_12_sub_pixel_variance32x32,
- vpx_highbd_12_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
- vpx_highbd_sad32x32x4d_bits12)
+ HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits12,
+ aom_highbd_sad32x32_avg_bits12, aom_highbd_12_variance32x32,
+ aom_highbd_12_sub_pixel_variance32x32,
+ aom_highbd_12_sub_pixel_avg_variance32x32,
+ aom_highbd_sad32x32x3_bits12, aom_highbd_sad32x32x8_bits12,
+ aom_highbd_sad32x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
- vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
- vpx_highbd_12_sub_pixel_variance64x64,
- vpx_highbd_12_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
- vpx_highbd_sad64x64x4d_bits12)
+ HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits12,
+ aom_highbd_sad64x64_avg_bits12, aom_highbd_12_variance64x64,
+ aom_highbd_12_sub_pixel_variance64x64,
+ aom_highbd_12_sub_pixel_avg_variance64x64,
+ aom_highbd_sad64x64x3_bits12, aom_highbd_sad64x64x8_bits12,
+ aom_highbd_sad64x64x4d_bits12)
- HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
- vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
- vpx_highbd_12_sub_pixel_variance16x16,
- vpx_highbd_12_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
- vpx_highbd_sad16x16x4d_bits12)
+ HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits12,
+ aom_highbd_sad16x16_avg_bits12, aom_highbd_12_variance16x16,
+ aom_highbd_12_sub_pixel_variance16x16,
+ aom_highbd_12_sub_pixel_avg_variance16x16,
+ aom_highbd_sad16x16x3_bits12, aom_highbd_sad16x16x8_bits12,
+ aom_highbd_sad16x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
- vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
- vpx_highbd_12_sub_pixel_variance16x8,
- vpx_highbd_12_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
- vpx_highbd_sad16x8x4d_bits12)
+ HIGHBD_BFP(BLOCK_16X8, aom_highbd_sad16x8_bits12,
+ aom_highbd_sad16x8_avg_bits12, aom_highbd_12_variance16x8,
+ aom_highbd_12_sub_pixel_variance16x8,
+ aom_highbd_12_sub_pixel_avg_variance16x8,
+ aom_highbd_sad16x8x3_bits12, aom_highbd_sad16x8x8_bits12,
+ aom_highbd_sad16x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
- vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
- vpx_highbd_12_sub_pixel_variance8x16,
- vpx_highbd_12_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
- vpx_highbd_sad8x16x4d_bits12)
+ HIGHBD_BFP(BLOCK_8X16, aom_highbd_sad8x16_bits12,
+ aom_highbd_sad8x16_avg_bits12, aom_highbd_12_variance8x16,
+ aom_highbd_12_sub_pixel_variance8x16,
+ aom_highbd_12_sub_pixel_avg_variance8x16,
+ aom_highbd_sad8x16x3_bits12, aom_highbd_sad8x16x8_bits12,
+ aom_highbd_sad8x16x4d_bits12)
HIGHBD_BFP(
- BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
- vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
- vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
- vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
+ BLOCK_8X8, aom_highbd_sad8x8_bits12, aom_highbd_sad8x8_avg_bits12,
+ aom_highbd_12_variance8x8, aom_highbd_12_sub_pixel_variance8x8,
+ aom_highbd_12_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits12,
+ aom_highbd_sad8x8x8_bits12, aom_highbd_sad8x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
- vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
- vpx_highbd_12_sub_pixel_variance8x4,
- vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
- vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
+ HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits12,
+ aom_highbd_sad8x4_avg_bits12, aom_highbd_12_variance8x4,
+ aom_highbd_12_sub_pixel_variance8x4,
+ aom_highbd_12_sub_pixel_avg_variance8x4, NULL,
+ aom_highbd_sad8x4x8_bits12, aom_highbd_sad8x4x4d_bits12)
- HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
- vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
- vpx_highbd_12_sub_pixel_variance4x8,
- vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
- vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
+ HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits12,
+ aom_highbd_sad4x8_avg_bits12, aom_highbd_12_variance4x8,
+ aom_highbd_12_sub_pixel_variance4x8,
+ aom_highbd_12_sub_pixel_avg_variance4x8, NULL,
+ aom_highbd_sad4x8x8_bits12, aom_highbd_sad4x8x4d_bits12)
HIGHBD_BFP(
- BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
- vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
- vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
- vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
+ BLOCK_4X4, aom_highbd_sad4x4_bits12, aom_highbd_sad4x4_avg_bits12,
+ aom_highbd_12_variance4x4, aom_highbd_12_sub_pixel_variance4x4,
+ aom_highbd_12_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits12,
+ aom_highbd_sad4x4x8_bits12, aom_highbd_sad4x4x4d_bits12)
#if CONFIG_EXT_PARTITION
HIGHBD_BFP(
- BLOCK_128X128, vpx_highbd_sad128x128_bits12,
- vpx_highbd_sad128x128_avg_bits12, vpx_highbd_12_variance128x128,
- vpx_highbd_12_sub_pixel_variance128x128,
- vpx_highbd_12_sub_pixel_avg_variance128x128,
- vpx_highbd_sad128x128x3_bits12, vpx_highbd_sad128x128x8_bits12,
- vpx_highbd_sad128x128x4d_bits12)
+ BLOCK_128X128, aom_highbd_sad128x128_bits12,
+ aom_highbd_sad128x128_avg_bits12, aom_highbd_12_variance128x128,
+ aom_highbd_12_sub_pixel_variance128x128,
+ aom_highbd_12_sub_pixel_avg_variance128x128,
+ aom_highbd_sad128x128x3_bits12, aom_highbd_sad128x128x8_bits12,
+ aom_highbd_sad128x128x4d_bits12)
- HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits12,
- vpx_highbd_sad128x64_avg_bits12,
- vpx_highbd_12_variance128x64,
- vpx_highbd_12_sub_pixel_variance128x64,
- vpx_highbd_12_sub_pixel_avg_variance128x64, NULL, NULL,
- vpx_highbd_sad128x64x4d_bits12)
+ HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits12,
+ aom_highbd_sad128x64_avg_bits12,
+ aom_highbd_12_variance128x64,
+ aom_highbd_12_sub_pixel_variance128x64,
+ aom_highbd_12_sub_pixel_avg_variance128x64, NULL, NULL,
+ aom_highbd_sad128x64x4d_bits12)
- HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits12,
- vpx_highbd_sad64x128_avg_bits12,
- vpx_highbd_12_variance64x128,
- vpx_highbd_12_sub_pixel_variance64x128,
- vpx_highbd_12_sub_pixel_avg_variance64x128, NULL, NULL,
- vpx_highbd_sad64x128x4d_bits12)
+ HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits12,
+ aom_highbd_sad64x128_avg_bits12,
+ aom_highbd_12_variance64x128,
+ aom_highbd_12_sub_pixel_variance64x128,
+ aom_highbd_12_sub_pixel_avg_variance64x128, NULL, NULL,
+ aom_highbd_sad64x128x4d_bits12)
#endif // CONFIG_EXT_PARTITION
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits12,
- vpx_highbd_12_masked_variance128x128,
- vpx_highbd_12_masked_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits12,
- vpx_highbd_12_masked_variance128x64,
- vpx_highbd_12_masked_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits12,
- vpx_highbd_12_masked_variance64x128,
- vpx_highbd_12_masked_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits12,
+ aom_highbd_12_masked_variance128x128,
+ aom_highbd_12_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits12,
+ aom_highbd_12_masked_variance128x64,
+ aom_highbd_12_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits12,
+ aom_highbd_12_masked_variance64x128,
+ aom_highbd_12_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits12,
- vpx_highbd_12_masked_variance64x64,
- vpx_highbd_12_masked_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits12,
- vpx_highbd_12_masked_variance64x32,
- vpx_highbd_12_masked_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits12,
- vpx_highbd_12_masked_variance32x64,
- vpx_highbd_12_masked_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits12,
- vpx_highbd_12_masked_variance32x32,
- vpx_highbd_12_masked_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits12,
- vpx_highbd_12_masked_variance32x16,
- vpx_highbd_12_masked_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits12,
- vpx_highbd_12_masked_variance16x32,
- vpx_highbd_12_masked_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits12,
- vpx_highbd_12_masked_variance16x16,
- vpx_highbd_12_masked_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits12,
- vpx_highbd_12_masked_variance8x16,
- vpx_highbd_12_masked_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits12,
- vpx_highbd_12_masked_variance16x8,
- vpx_highbd_12_masked_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits12,
- vpx_highbd_12_masked_variance8x8,
- vpx_highbd_12_masked_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits12,
- vpx_highbd_12_masked_variance4x8,
- vpx_highbd_12_masked_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits12,
- vpx_highbd_12_masked_variance8x4,
- vpx_highbd_12_masked_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits12,
- vpx_highbd_12_masked_variance4x4,
- vpx_highbd_12_masked_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits12,
+ aom_highbd_12_masked_variance64x64,
+ aom_highbd_12_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits12,
+ aom_highbd_12_masked_variance64x32,
+ aom_highbd_12_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits12,
+ aom_highbd_12_masked_variance32x64,
+ aom_highbd_12_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits12,
+ aom_highbd_12_masked_variance32x32,
+ aom_highbd_12_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits12,
+ aom_highbd_12_masked_variance32x16,
+ aom_highbd_12_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits12,
+ aom_highbd_12_masked_variance16x32,
+ aom_highbd_12_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits12,
+ aom_highbd_12_masked_variance16x16,
+ aom_highbd_12_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits12,
+ aom_highbd_12_masked_variance8x16,
+ aom_highbd_12_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits12,
+ aom_highbd_12_masked_variance16x8,
+ aom_highbd_12_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits12,
+ aom_highbd_12_masked_variance8x8,
+ aom_highbd_12_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits12,
+ aom_highbd_12_masked_variance4x8,
+ aom_highbd_12_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits12,
+ aom_highbd_12_masked_variance8x4,
+ aom_highbd_12_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits12,
+ aom_highbd_12_masked_variance4x4,
+ aom_highbd_12_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
#if CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits12,
- vpx_highbd_12_obmc_variance128x128,
- vpx_highbd_12_obmc_sub_pixel_variance128x128)
- HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits12,
- vpx_highbd_12_obmc_variance128x64,
- vpx_highbd_12_obmc_sub_pixel_variance128x64)
- HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits12,
- vpx_highbd_12_obmc_variance64x128,
- vpx_highbd_12_obmc_sub_pixel_variance64x128)
+ HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits12,
+ aom_highbd_12_obmc_variance128x128,
+ aom_highbd_12_obmc_sub_pixel_variance128x128)
+ HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits12,
+ aom_highbd_12_obmc_variance128x64,
+ aom_highbd_12_obmc_sub_pixel_variance128x64)
+ HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits12,
+ aom_highbd_12_obmc_variance64x128,
+ aom_highbd_12_obmc_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits12,
- vpx_highbd_12_obmc_variance64x64,
- vpx_highbd_12_obmc_sub_pixel_variance64x64)
- HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits12,
- vpx_highbd_12_obmc_variance64x32,
- vpx_highbd_12_obmc_sub_pixel_variance64x32)
- HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits12,
- vpx_highbd_12_obmc_variance32x64,
- vpx_highbd_12_obmc_sub_pixel_variance32x64)
- HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits12,
- vpx_highbd_12_obmc_variance32x32,
- vpx_highbd_12_obmc_sub_pixel_variance32x32)
- HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits12,
- vpx_highbd_12_obmc_variance32x16,
- vpx_highbd_12_obmc_sub_pixel_variance32x16)
- HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits12,
- vpx_highbd_12_obmc_variance16x32,
- vpx_highbd_12_obmc_sub_pixel_variance16x32)
- HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits12,
- vpx_highbd_12_obmc_variance16x16,
- vpx_highbd_12_obmc_sub_pixel_variance16x16)
- HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits12,
- vpx_highbd_12_obmc_variance8x16,
- vpx_highbd_12_obmc_sub_pixel_variance8x16)
- HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits12,
- vpx_highbd_12_obmc_variance16x8,
- vpx_highbd_12_obmc_sub_pixel_variance16x8)
- HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits12,
- vpx_highbd_12_obmc_variance8x8,
- vpx_highbd_12_obmc_sub_pixel_variance8x8)
- HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits12,
- vpx_highbd_12_obmc_variance4x8,
- vpx_highbd_12_obmc_sub_pixel_variance4x8)
- HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits12,
- vpx_highbd_12_obmc_variance8x4,
- vpx_highbd_12_obmc_sub_pixel_variance8x4)
- HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits12,
- vpx_highbd_12_obmc_variance4x4,
- vpx_highbd_12_obmc_sub_pixel_variance4x4)
+ HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits12,
+ aom_highbd_12_obmc_variance64x64,
+ aom_highbd_12_obmc_sub_pixel_variance64x64)
+ HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits12,
+ aom_highbd_12_obmc_variance64x32,
+ aom_highbd_12_obmc_sub_pixel_variance64x32)
+ HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits12,
+ aom_highbd_12_obmc_variance32x64,
+ aom_highbd_12_obmc_sub_pixel_variance32x64)
+ HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits12,
+ aom_highbd_12_obmc_variance32x32,
+ aom_highbd_12_obmc_sub_pixel_variance32x32)
+ HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits12,
+ aom_highbd_12_obmc_variance32x16,
+ aom_highbd_12_obmc_sub_pixel_variance32x16)
+ HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits12,
+ aom_highbd_12_obmc_variance16x32,
+ aom_highbd_12_obmc_sub_pixel_variance16x32)
+ HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits12,
+ aom_highbd_12_obmc_variance16x16,
+ aom_highbd_12_obmc_sub_pixel_variance16x16)
+ HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits12,
+ aom_highbd_12_obmc_variance8x16,
+ aom_highbd_12_obmc_sub_pixel_variance8x16)
+ HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits12,
+ aom_highbd_12_obmc_variance16x8,
+ aom_highbd_12_obmc_sub_pixel_variance16x8)
+ HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits12,
+ aom_highbd_12_obmc_variance8x8,
+ aom_highbd_12_obmc_sub_pixel_variance8x8)
+ HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits12,
+ aom_highbd_12_obmc_variance4x8,
+ aom_highbd_12_obmc_sub_pixel_variance4x8)
+ HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits12,
+ aom_highbd_12_obmc_variance8x4,
+ aom_highbd_12_obmc_sub_pixel_variance8x4)
+ HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits12,
+ aom_highbd_12_obmc_variance4x4,
+ aom_highbd_12_obmc_sub_pixel_variance4x4)
#endif // CONFIG_OBMC
break;
default:
assert(0 &&
- "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ "cm->bit_depth should be AOM_BITS_8, "
+ "AOM_BITS_10 or AOM_BITS_12");
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void realloc_segmentation_maps(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void realloc_segmentation_maps(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// Create the encoder segmentation map and set all entries to 0
- vpx_free(cpi->segmentation_map);
+ aom_free(cpi->segmentation_map);
CHECK_MEM_ERROR(cm, cpi->segmentation_map,
- vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+ aom_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh.
- if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+ if (cpi->cyclic_refresh) av1_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
- vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
+ av1_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
// Create a map used to mark inactive areas.
- vpx_free(cpi->active_map.map);
+ aom_free(cpi->active_map.map);
CHECK_MEM_ERROR(cm, cpi->active_map.map,
- vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+ aom_calloc(cm->mi_rows * cm->mi_cols, 1));
// And a place holder structure is the coding context
// for use if we want to save and restore it
- vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+ aom_free(cpi->coding_context.last_frame_seg_map_copy);
CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
- vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+ aom_calloc(cm->mi_rows * cm->mi_cols, 1));
}
-void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
@@ -1907,19 +1907,19 @@
cm->color_range = oxcf->color_range;
if (cm->profile <= PROFILE_1)
- assert(cm->bit_depth == VPX_BITS_8);
+ assert(cm->bit_depth == AOM_BITS_8);
else
- assert(cm->bit_depth > VPX_BITS_8);
+ assert(cm->bit_depth > AOM_BITS_8);
cpi->oxcf = *oxcf;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_GLOBAL_MOTION
cpi->td.mb.e_mbd.global_motion = cm->global_motion;
#endif // CONFIG_GLOBAL_MOTION
- if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+ if ((oxcf->pass == 0) && (oxcf->rc_mode == AOM_Q)) {
rc->baseline_gf_interval = FIXED_GF_INTERVAL;
} else {
rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
@@ -1937,21 +1937,21 @@
: REFRESH_FRAME_CONTEXT_BACKWARD;
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
- cm->allow_screen_content_tools = (cpi->oxcf.content == VPX_CONTENT_SCREEN);
+ cm->allow_screen_content_tools = (cpi->oxcf.content == AOM_CONTENT_SCREEN);
if (cm->allow_screen_content_tools) {
MACROBLOCK *x = &cpi->td.mb;
if (x->palette_buffer == 0) {
CHECK_MEM_ERROR(cm, x->palette_buffer,
- vpx_memalign(16, sizeof(*x->palette_buffer)));
+ aom_memalign(16, sizeof(*x->palette_buffer)));
}
// Reallocate the pc_tree, as it's contents depends on
// the state of cm->allow_screen_content_tools
- vp10_free_pc_tree(&cpi->td);
- vp10_setup_pc_tree(&cpi->common, &cpi->td);
+ av1_free_pc_tree(&cpi->td);
+ av1_setup_pc_tree(&cpi->common, &cpi->td);
}
- vp10_reset_segment_features(cm);
- vp10_set_high_precision_mv(cpi, 0);
+ av1_reset_segment_features(cm);
+ av1_set_high_precision_mv(cpi, 0);
{
int i;
@@ -1965,11 +1965,11 @@
// Under a configuration change, where maximum_buffer_size may change,
// keep buffer level clipped to the maximum allowed buffer size.
- rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
- rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
+ rc->bits_off_target = AOMMIN(rc->bits_off_target, rc->maximum_buffer_size);
+ rc->buffer_level = AOMMIN(rc->buffer_level, rc->maximum_buffer_size);
// Set up frame rate and related parameters rate control values.
- vp10_new_framerate(cpi, cpi->framerate);
+ av1_new_framerate(cpi, cpi->framerate);
// Set absolute upper and lower quality limits
rc->worst_quality = cpi->oxcf.worst_allowed_q;
@@ -1989,8 +1989,8 @@
if (cpi->initial_width) {
if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
- vp10_free_context_buffers(cm);
- vp10_alloc_compressor_data(cpi);
+ av1_free_context_buffers(cm);
+ av1_alloc_compressor_data(cpi);
realloc_segmentation_maps(cpi);
cpi->initial_width = cpi->initial_height = 0;
}
@@ -2017,7 +2017,7 @@
cpi->ext_refresh_frame_flags_pending = 0;
cpi->ext_refresh_frame_context_pending = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
}
@@ -2066,7 +2066,7 @@
} while (++i <= MV_MAX);
}
-static INLINE void init_upsampled_ref_frame_bufs(VP10_COMP *cpi) {
+static INLINE void init_upsampled_ref_frame_bufs(AV1_COMP *cpi) {
int i;
for (i = 0; i < (REF_FRAMES + 1); ++i) {
@@ -2075,31 +2075,31 @@
}
}
-VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
- BufferPool *const pool) {
+AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
+ BufferPool *const pool) {
unsigned int i;
- VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP));
- VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
+ AV1_COMP *volatile const cpi = aom_memalign(32, sizeof(AV1_COMP));
+ AV1_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
if (!cm) return NULL;
- vp10_zero(*cpi);
+ av1_zero(*cpi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
- vp10_remove_compressor(cpi);
+ av1_remove_compressor(cpi);
return 0;
}
cm->error.setjmp = 1;
- cm->alloc_mi = vp10_enc_alloc_mi;
- cm->free_mi = vp10_enc_free_mi;
- cm->setup_mi = vp10_enc_setup_mi;
+ cm->alloc_mi = av1_enc_alloc_mi;
+ cm->free_mi = av1_enc_free_mi;
+ cm->setup_mi = av1_enc_setup_mi;
- CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+ (FRAME_CONTEXT *)aom_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
cpi->resize_state = 0;
cpi->resize_avg_qp = 0;
@@ -2107,7 +2107,7 @@
cpi->common.buffer_pool = pool;
init_config(cpi, oxcf);
- vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
+ av1_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
cm->current_video_frame = 0;
cpi->partition_search_skippable_frame = 0;
@@ -2119,38 +2119,38 @@
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
}
#endif
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
- vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
+ aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
i++) {
CHECK_MEM_ERROR(
cm, cpi->mbgraph_stats[i].mb_stats,
- vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+ aom_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
}
#if CONFIG_FP_MB_STATS
@@ -2158,7 +2158,7 @@
if (cpi->use_fp_mb_stats) {
// a place holder used to store the first pass mb stats in the first pass
CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
- vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
+ aom_calloc(cm->MBs * sizeof(uint8_t), 1));
} else {
cpi->twopass.frame_mb_stats_buf = NULL;
}
@@ -2196,7 +2196,7 @@
if (cpi->b_calculate_consistency) {
CHECK_MEM_ERROR(cm, cpi->ssim_vars,
- vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
+ aom_malloc(sizeof(*cpi->ssim_vars) * 4 *
cpi->common.mi_rows * cpi->common.mi_cols));
cpi->worst_consistency = 100.0;
}
@@ -2241,7 +2241,7 @@
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
if (oxcf->pass == 1) {
- vp10_init_first_pass(cpi);
+ av1_init_first_pass(cpi);
} else if (oxcf->pass == 2) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
@@ -2263,16 +2263,16 @@
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
- vp10_init_second_pass(cpi);
+ av1_init_second_pass(cpi);
}
init_upsampled_ref_frame_bufs(cpi);
- vp10_set_speed_features_framesize_independent(cpi);
- vp10_set_speed_features_framesize_dependent(cpi);
+ av1_set_speed_features_framesize_independent(cpi);
+ av1_set_speed_features_framesize_dependent(cpi);
// Allocate memory to store variances for a frame.
- CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
+ CHECK_MEM_ERROR(cm, cpi->source_diff_var, aom_calloc(cm->MBs, sizeof(diff)));
cpi->source_var_thresh = 0;
cpi->frames_till_next_var_check = 0;
@@ -2287,70 +2287,70 @@
cpi->fn_ptr[BT].sdx4df = SDX4DF;
#if CONFIG_EXT_PARTITION
- BFP(BLOCK_128X128, vpx_sad128x128, vpx_sad128x128_avg, vpx_variance128x128,
- vpx_sub_pixel_variance128x128, vpx_sub_pixel_avg_variance128x128,
- vpx_sad128x128x3, vpx_sad128x128x8, vpx_sad128x128x4d)
+ BFP(BLOCK_128X128, aom_sad128x128, aom_sad128x128_avg, aom_variance128x128,
+ aom_sub_pixel_variance128x128, aom_sub_pixel_avg_variance128x128,
+ aom_sad128x128x3, aom_sad128x128x8, aom_sad128x128x4d)
- BFP(BLOCK_128X64, vpx_sad128x64, vpx_sad128x64_avg, vpx_variance128x64,
- vpx_sub_pixel_variance128x64, vpx_sub_pixel_avg_variance128x64, NULL,
- NULL, vpx_sad128x64x4d)
+ BFP(BLOCK_128X64, aom_sad128x64, aom_sad128x64_avg, aom_variance128x64,
+ aom_sub_pixel_variance128x64, aom_sub_pixel_avg_variance128x64, NULL,
+ NULL, aom_sad128x64x4d)
- BFP(BLOCK_64X128, vpx_sad64x128, vpx_sad64x128_avg, vpx_variance64x128,
- vpx_sub_pixel_variance64x128, vpx_sub_pixel_avg_variance64x128, NULL,
- NULL, vpx_sad64x128x4d)
+ BFP(BLOCK_64X128, aom_sad64x128, aom_sad64x128_avg, aom_variance64x128,
+ aom_sub_pixel_variance64x128, aom_sub_pixel_avg_variance64x128, NULL,
+ NULL, aom_sad64x128x4d)
#endif // CONFIG_EXT_PARTITION
- BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
- vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
- vpx_sad32x16x4d)
+ BFP(BLOCK_32X16, aom_sad32x16, aom_sad32x16_avg, aom_variance32x16,
+ aom_sub_pixel_variance32x16, aom_sub_pixel_avg_variance32x16, NULL, NULL,
+ aom_sad32x16x4d)
- BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
- vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
- vpx_sad16x32x4d)
+ BFP(BLOCK_16X32, aom_sad16x32, aom_sad16x32_avg, aom_variance16x32,
+ aom_sub_pixel_variance16x32, aom_sub_pixel_avg_variance16x32, NULL, NULL,
+ aom_sad16x32x4d)
- BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
- vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
- vpx_sad64x32x4d)
+ BFP(BLOCK_64X32, aom_sad64x32, aom_sad64x32_avg, aom_variance64x32,
+ aom_sub_pixel_variance64x32, aom_sub_pixel_avg_variance64x32, NULL, NULL,
+ aom_sad64x32x4d)
- BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
- vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
- vpx_sad32x64x4d)
+ BFP(BLOCK_32X64, aom_sad32x64, aom_sad32x64_avg, aom_variance32x64,
+ aom_sub_pixel_variance32x64, aom_sub_pixel_avg_variance32x64, NULL, NULL,
+ aom_sad32x64x4d)
- BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
- vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
- vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
+ BFP(BLOCK_32X32, aom_sad32x32, aom_sad32x32_avg, aom_variance32x32,
+ aom_sub_pixel_variance32x32, aom_sub_pixel_avg_variance32x32,
+ aom_sad32x32x3, aom_sad32x32x8, aom_sad32x32x4d)
- BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
- vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
- vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
+ BFP(BLOCK_64X64, aom_sad64x64, aom_sad64x64_avg, aom_variance64x64,
+ aom_sub_pixel_variance64x64, aom_sub_pixel_avg_variance64x64,
+ aom_sad64x64x3, aom_sad64x64x8, aom_sad64x64x4d)
- BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
- vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
- vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
+ BFP(BLOCK_16X16, aom_sad16x16, aom_sad16x16_avg, aom_variance16x16,
+ aom_sub_pixel_variance16x16, aom_sub_pixel_avg_variance16x16,
+ aom_sad16x16x3, aom_sad16x16x8, aom_sad16x16x4d)
- BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
- vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
- vpx_sad16x8x8, vpx_sad16x8x4d)
+ BFP(BLOCK_16X8, aom_sad16x8, aom_sad16x8_avg, aom_variance16x8,
+ aom_sub_pixel_variance16x8, aom_sub_pixel_avg_variance16x8, aom_sad16x8x3,
+ aom_sad16x8x8, aom_sad16x8x4d)
- BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
- vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
- vpx_sad8x16x8, vpx_sad8x16x4d)
+ BFP(BLOCK_8X16, aom_sad8x16, aom_sad8x16_avg, aom_variance8x16,
+ aom_sub_pixel_variance8x16, aom_sub_pixel_avg_variance8x16, aom_sad8x16x3,
+ aom_sad8x16x8, aom_sad8x16x4d)
- BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
- vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
- vpx_sad8x8x8, vpx_sad8x8x4d)
+ BFP(BLOCK_8X8, aom_sad8x8, aom_sad8x8_avg, aom_variance8x8,
+ aom_sub_pixel_variance8x8, aom_sub_pixel_avg_variance8x8, aom_sad8x8x3,
+ aom_sad8x8x8, aom_sad8x8x4d)
- BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
- vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
- vpx_sad8x4x8, vpx_sad8x4x4d)
+ BFP(BLOCK_8X4, aom_sad8x4, aom_sad8x4_avg, aom_variance8x4,
+ aom_sub_pixel_variance8x4, aom_sub_pixel_avg_variance8x4, NULL,
+ aom_sad8x4x8, aom_sad8x4x4d)
- BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
- vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
- vpx_sad4x8x8, vpx_sad4x8x4d)
+ BFP(BLOCK_4X8, aom_sad4x8, aom_sad4x8_avg, aom_variance4x8,
+ aom_sub_pixel_variance4x8, aom_sub_pixel_avg_variance4x8, NULL,
+ aom_sad4x8x8, aom_sad4x8x4d)
- BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
- vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
- vpx_sad4x4x8, vpx_sad4x4x4d)
+ BFP(BLOCK_4X4, aom_sad4x4, aom_sad4x4_avg, aom_variance4x4,
+ aom_sub_pixel_variance4x4, aom_sub_pixel_avg_variance4x4, aom_sad4x4x3,
+ aom_sad4x4x8, aom_sad4x4x4d)
#if CONFIG_OBMC
#define OBFP(BT, OSDF, OVF, OSVF) \
@@ -2359,39 +2359,39 @@
cpi->fn_ptr[BT].osvf = OSVF;
#if CONFIG_EXT_PARTITION
- OBFP(BLOCK_128X128, vpx_obmc_sad128x128, vpx_obmc_variance128x128,
- vpx_obmc_sub_pixel_variance128x128)
- OBFP(BLOCK_128X64, vpx_obmc_sad128x64, vpx_obmc_variance128x64,
- vpx_obmc_sub_pixel_variance128x64)
- OBFP(BLOCK_64X128, vpx_obmc_sad64x128, vpx_obmc_variance64x128,
- vpx_obmc_sub_pixel_variance64x128)
+ OBFP(BLOCK_128X128, aom_obmc_sad128x128, aom_obmc_variance128x128,
+ aom_obmc_sub_pixel_variance128x128)
+ OBFP(BLOCK_128X64, aom_obmc_sad128x64, aom_obmc_variance128x64,
+ aom_obmc_sub_pixel_variance128x64)
+ OBFP(BLOCK_64X128, aom_obmc_sad64x128, aom_obmc_variance64x128,
+ aom_obmc_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- OBFP(BLOCK_64X64, vpx_obmc_sad64x64, vpx_obmc_variance64x64,
- vpx_obmc_sub_pixel_variance64x64)
- OBFP(BLOCK_64X32, vpx_obmc_sad64x32, vpx_obmc_variance64x32,
- vpx_obmc_sub_pixel_variance64x32)
- OBFP(BLOCK_32X64, vpx_obmc_sad32x64, vpx_obmc_variance32x64,
- vpx_obmc_sub_pixel_variance32x64)
- OBFP(BLOCK_32X32, vpx_obmc_sad32x32, vpx_obmc_variance32x32,
- vpx_obmc_sub_pixel_variance32x32)
- OBFP(BLOCK_32X16, vpx_obmc_sad32x16, vpx_obmc_variance32x16,
- vpx_obmc_sub_pixel_variance32x16)
- OBFP(BLOCK_16X32, vpx_obmc_sad16x32, vpx_obmc_variance16x32,
- vpx_obmc_sub_pixel_variance16x32)
- OBFP(BLOCK_16X16, vpx_obmc_sad16x16, vpx_obmc_variance16x16,
- vpx_obmc_sub_pixel_variance16x16)
- OBFP(BLOCK_16X8, vpx_obmc_sad16x8, vpx_obmc_variance16x8,
- vpx_obmc_sub_pixel_variance16x8)
- OBFP(BLOCK_8X16, vpx_obmc_sad8x16, vpx_obmc_variance8x16,
- vpx_obmc_sub_pixel_variance8x16)
- OBFP(BLOCK_8X8, vpx_obmc_sad8x8, vpx_obmc_variance8x8,
- vpx_obmc_sub_pixel_variance8x8)
- OBFP(BLOCK_4X8, vpx_obmc_sad4x8, vpx_obmc_variance4x8,
- vpx_obmc_sub_pixel_variance4x8)
- OBFP(BLOCK_8X4, vpx_obmc_sad8x4, vpx_obmc_variance8x4,
- vpx_obmc_sub_pixel_variance8x4)
- OBFP(BLOCK_4X4, vpx_obmc_sad4x4, vpx_obmc_variance4x4,
- vpx_obmc_sub_pixel_variance4x4)
+ OBFP(BLOCK_64X64, aom_obmc_sad64x64, aom_obmc_variance64x64,
+ aom_obmc_sub_pixel_variance64x64)
+ OBFP(BLOCK_64X32, aom_obmc_sad64x32, aom_obmc_variance64x32,
+ aom_obmc_sub_pixel_variance64x32)
+ OBFP(BLOCK_32X64, aom_obmc_sad32x64, aom_obmc_variance32x64,
+ aom_obmc_sub_pixel_variance32x64)
+ OBFP(BLOCK_32X32, aom_obmc_sad32x32, aom_obmc_variance32x32,
+ aom_obmc_sub_pixel_variance32x32)
+ OBFP(BLOCK_32X16, aom_obmc_sad32x16, aom_obmc_variance32x16,
+ aom_obmc_sub_pixel_variance32x16)
+ OBFP(BLOCK_16X32, aom_obmc_sad16x32, aom_obmc_variance16x32,
+ aom_obmc_sub_pixel_variance16x32)
+ OBFP(BLOCK_16X16, aom_obmc_sad16x16, aom_obmc_variance16x16,
+ aom_obmc_sub_pixel_variance16x16)
+ OBFP(BLOCK_16X8, aom_obmc_sad16x8, aom_obmc_variance16x8,
+ aom_obmc_sub_pixel_variance16x8)
+ OBFP(BLOCK_8X16, aom_obmc_sad8x16, aom_obmc_variance8x16,
+ aom_obmc_sub_pixel_variance8x16)
+ OBFP(BLOCK_8X8, aom_obmc_sad8x8, aom_obmc_variance8x8,
+ aom_obmc_sub_pixel_variance8x8)
+ OBFP(BLOCK_4X8, aom_obmc_sad4x8, aom_obmc_variance4x8,
+ aom_obmc_sub_pixel_variance4x8)
+ OBFP(BLOCK_8X4, aom_obmc_sad8x4, aom_obmc_variance8x4,
+ aom_obmc_sub_pixel_variance8x4)
+ OBFP(BLOCK_4X4, aom_obmc_sad4x4, aom_obmc_variance4x4,
+ aom_obmc_sub_pixel_variance4x4)
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
@@ -2401,58 +2401,58 @@
cpi->fn_ptr[BT].msvf = MSVF;
#if CONFIG_EXT_PARTITION
- MBFP(BLOCK_128X128, vpx_masked_sad128x128, vpx_masked_variance128x128,
- vpx_masked_sub_pixel_variance128x128)
- MBFP(BLOCK_128X64, vpx_masked_sad128x64, vpx_masked_variance128x64,
- vpx_masked_sub_pixel_variance128x64)
- MBFP(BLOCK_64X128, vpx_masked_sad64x128, vpx_masked_variance64x128,
- vpx_masked_sub_pixel_variance64x128)
+ MBFP(BLOCK_128X128, aom_masked_sad128x128, aom_masked_variance128x128,
+ aom_masked_sub_pixel_variance128x128)
+ MBFP(BLOCK_128X64, aom_masked_sad128x64, aom_masked_variance128x64,
+ aom_masked_sub_pixel_variance128x64)
+ MBFP(BLOCK_64X128, aom_masked_sad64x128, aom_masked_variance64x128,
+ aom_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- MBFP(BLOCK_64X64, vpx_masked_sad64x64, vpx_masked_variance64x64,
- vpx_masked_sub_pixel_variance64x64)
- MBFP(BLOCK_64X32, vpx_masked_sad64x32, vpx_masked_variance64x32,
- vpx_masked_sub_pixel_variance64x32)
- MBFP(BLOCK_32X64, vpx_masked_sad32x64, vpx_masked_variance32x64,
- vpx_masked_sub_pixel_variance32x64)
- MBFP(BLOCK_32X32, vpx_masked_sad32x32, vpx_masked_variance32x32,
- vpx_masked_sub_pixel_variance32x32)
- MBFP(BLOCK_32X16, vpx_masked_sad32x16, vpx_masked_variance32x16,
- vpx_masked_sub_pixel_variance32x16)
- MBFP(BLOCK_16X32, vpx_masked_sad16x32, vpx_masked_variance16x32,
- vpx_masked_sub_pixel_variance16x32)
- MBFP(BLOCK_16X16, vpx_masked_sad16x16, vpx_masked_variance16x16,
- vpx_masked_sub_pixel_variance16x16)
- MBFP(BLOCK_16X8, vpx_masked_sad16x8, vpx_masked_variance16x8,
- vpx_masked_sub_pixel_variance16x8)
- MBFP(BLOCK_8X16, vpx_masked_sad8x16, vpx_masked_variance8x16,
- vpx_masked_sub_pixel_variance8x16)
- MBFP(BLOCK_8X8, vpx_masked_sad8x8, vpx_masked_variance8x8,
- vpx_masked_sub_pixel_variance8x8)
- MBFP(BLOCK_4X8, vpx_masked_sad4x8, vpx_masked_variance4x8,
- vpx_masked_sub_pixel_variance4x8)
- MBFP(BLOCK_8X4, vpx_masked_sad8x4, vpx_masked_variance8x4,
- vpx_masked_sub_pixel_variance8x4)
- MBFP(BLOCK_4X4, vpx_masked_sad4x4, vpx_masked_variance4x4,
- vpx_masked_sub_pixel_variance4x4)
+ MBFP(BLOCK_64X64, aom_masked_sad64x64, aom_masked_variance64x64,
+ aom_masked_sub_pixel_variance64x64)
+ MBFP(BLOCK_64X32, aom_masked_sad64x32, aom_masked_variance64x32,
+ aom_masked_sub_pixel_variance64x32)
+ MBFP(BLOCK_32X64, aom_masked_sad32x64, aom_masked_variance32x64,
+ aom_masked_sub_pixel_variance32x64)
+ MBFP(BLOCK_32X32, aom_masked_sad32x32, aom_masked_variance32x32,
+ aom_masked_sub_pixel_variance32x32)
+ MBFP(BLOCK_32X16, aom_masked_sad32x16, aom_masked_variance32x16,
+ aom_masked_sub_pixel_variance32x16)
+ MBFP(BLOCK_16X32, aom_masked_sad16x32, aom_masked_variance16x32,
+ aom_masked_sub_pixel_variance16x32)
+ MBFP(BLOCK_16X16, aom_masked_sad16x16, aom_masked_variance16x16,
+ aom_masked_sub_pixel_variance16x16)
+ MBFP(BLOCK_16X8, aom_masked_sad16x8, aom_masked_variance16x8,
+ aom_masked_sub_pixel_variance16x8)
+ MBFP(BLOCK_8X16, aom_masked_sad8x16, aom_masked_variance8x16,
+ aom_masked_sub_pixel_variance8x16)
+ MBFP(BLOCK_8X8, aom_masked_sad8x8, aom_masked_variance8x8,
+ aom_masked_sub_pixel_variance8x8)
+ MBFP(BLOCK_4X8, aom_masked_sad4x8, aom_masked_variance4x8,
+ aom_masked_sub_pixel_variance4x8)
+ MBFP(BLOCK_8X4, aom_masked_sad8x4, aom_masked_variance8x4,
+ aom_masked_sub_pixel_variance8x4)
+ MBFP(BLOCK_4X4, aom_masked_sad4x4, aom_masked_variance4x4,
+ aom_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
- /* vp10_init_quantizer() is first called here. Add check in
- * vp10_frame_init_quantizer() so that vp10_init_quantizer is only
+ /* av1_init_quantizer() is first called here. Add check in
+ * av1_frame_init_quantizer() so that av1_init_quantizer is only
* called later when needed. This will avoid unnecessary calls of
- * vp10_init_quantizer() for every frame.
+ * av1_init_quantizer() for every frame.
*/
- vp10_init_quantizer(cpi);
+ av1_init_quantizer(cpi);
#if CONFIG_AOM_QM
aom_qm_init(cm);
#endif
- vp10_loop_filter_init(cm);
+ av1_loop_filter_init(cm);
#if CONFIG_LOOP_RESTORATION
- vp10_loop_restoration_precal();
+ av1_loop_restoration_precal();
#endif // CONFIG_LOOP_RESTORATION
cm->error.setjmp = 0;
@@ -2465,8 +2465,8 @@
#define SNPRINT2(H, T, V) \
snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
-void vp10_remove_compressor(VP10_COMP *cpi) {
- VP10_COMMON *cm;
+void av1_remove_compressor(AV1_COMP *cpi) {
+ AV1_COMMON *cm;
unsigned int i;
int t;
@@ -2475,7 +2475,7 @@
cm = &cpi->common;
if (cm->current_video_frame > 0) {
#if CONFIG_INTERNAL_STATS
- vpx_clear_system_state();
+ aom_clear_system_state();
if (cpi->oxcf.pass != 1) {
char headings[512] = { 0 };
@@ -2493,13 +2493,13 @@
const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
if (cpi->b_calculate_psnr) {
- const double total_psnr = vpx_sse_to_psnr(
+ const double total_psnr = aom_sse_to_psnr(
(double)cpi->total_samples, peak, (double)cpi->total_sq_error);
const double total_ssim =
100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
snprintf(headings, sizeof(headings),
"Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
- "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
+ "AOMSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
"WstPsnr\tWstSsim\tWstFast\tWstHVS");
snprintf(results, sizeof(results),
"%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
@@ -2519,7 +2519,7 @@
if (cpi->b_calculate_consistency) {
double consistency =
- vpx_sse_to_psnr((double)cpi->total_samples, peak,
+ aom_sse_to_psnr((double)cpi->total_samples, peak,
(double)cpi->total_inconsistency);
SNPRINT(headings, "\tConsist\tWstCons");
@@ -2550,44 +2550,44 @@
}
for (t = 0; t < cpi->num_workers; ++t) {
- VPxWorker *const worker = &cpi->workers[t];
+ AVxWorker *const worker = &cpi->workers[t];
EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
// Deallocate allocated threads.
- vpx_get_worker_interface()->end(worker);
+ aom_get_worker_interface()->end(worker);
// Deallocate allocated thread data.
if (t < cpi->num_workers - 1) {
if (cpi->common.allow_screen_content_tools)
- vpx_free(thread_data->td->mb.palette_buffer);
- vpx_free(thread_data->td->counts);
- vp10_free_pc_tree(thread_data->td);
- vp10_free_var_tree(thread_data->td);
- vpx_free(thread_data->td);
+ aom_free(thread_data->td->mb.palette_buffer);
+ aom_free(thread_data->td->counts);
+ av1_free_pc_tree(thread_data->td);
+ av1_free_var_tree(thread_data->td);
+ aom_free(thread_data->td);
}
}
- vpx_free(cpi->tile_thr_data);
- vpx_free(cpi->workers);
+ aom_free(cpi->tile_thr_data);
+ aom_free(cpi->workers);
- if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+ if (cpi->num_workers > 1) av1_loop_filter_dealloc(&cpi->lf_row_sync);
dealloc_compressor_data(cpi);
for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
++i) {
- vpx_free(cpi->mbgraph_stats[i].mb_stats);
+ aom_free(cpi->mbgraph_stats[i].mb_stats);
}
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
- vpx_free(cpi->twopass.frame_mb_stats_buf);
+ aom_free(cpi->twopass.frame_mb_stats_buf);
cpi->twopass.frame_mb_stats_buf = NULL;
}
#endif
- vp10_remove_common(cm);
- vp10_free_ref_frame_buffers(cm->buffer_pool);
- vpx_free(cpi);
+ av1_remove_common(cm);
+ av1_free_ref_frame_buffers(cm->buffer_pool);
+ aom_free(cpi);
#ifdef OUTPUT_YUV_SKINMAP
fclose(yuv_skinmap_file);
@@ -2610,15 +2610,15 @@
#endif
}
-static void generate_psnr_packet(VP10_COMP *cpi) {
- struct vpx_codec_cx_pkt pkt;
+static void generate_psnr_packet(AV1_COMP *cpi) {
+ struct aom_codec_cx_pkt pkt;
int i;
PSNR_STATS psnr;
-#if CONFIG_VP9_HIGHBITDEPTH
- vpx_calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
+#if CONFIG_AOM_HIGHBITDEPTH
+ aom_calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
#else
- vpx_calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr);
+ aom_calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr);
#endif
for (i = 0; i < 4; ++i) {
@@ -2626,69 +2626,69 @@
pkt.data.psnr.sse[i] = psnr.sse[i];
pkt.data.psnr.psnr[i] = psnr.psnr[i];
}
- pkt.kind = VPX_CODEC_PSNR_PKT;
- vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+ pkt.kind = AOM_CODEC_PSNR_PKT;
+ aom_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags) {
if (ref_frame_flags > ((1 << INTER_REFS_PER_FRAME) - 1)) return -1;
cpi->ref_frame_flags = ref_frame_flags;
return 0;
}
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
- cpi->ext_refresh_golden_frame = (ref_frame_flags & VPX_GOLD_FLAG) != 0;
- cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VPX_ALT_FLAG) != 0;
- cpi->ext_refresh_last_frame = (ref_frame_flags & VPX_LAST_FLAG) != 0;
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags) {
+ cpi->ext_refresh_golden_frame = (ref_frame_flags & AOM_GOLD_FLAG) != 0;
+ cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & AOM_ALT_FLAG) != 0;
+ cpi->ext_refresh_last_frame = (ref_frame_flags & AOM_LAST_FLAG) != 0;
cpi->ext_refresh_frame_flags_pending = 1;
}
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
- VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_av1_ref_frame_buffer(
+ AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag) {
MV_REFERENCE_FRAME ref_frame = NONE;
- if (ref_frame_flag == VPX_LAST_FLAG) ref_frame = LAST_FRAME;
+ if (ref_frame_flag == AOM_LAST_FLAG) ref_frame = LAST_FRAME;
#if CONFIG_EXT_REFS
- else if (ref_frame_flag == VPX_LAST2_FLAG)
+ else if (ref_frame_flag == AOM_LAST2_FLAG)
ref_frame = LAST2_FRAME;
- else if (ref_frame_flag == VPX_LAST3_FLAG)
+ else if (ref_frame_flag == AOM_LAST3_FLAG)
ref_frame = LAST3_FRAME;
#endif // CONFIG_EXT_REFS
- else if (ref_frame_flag == VPX_GOLD_FLAG)
+ else if (ref_frame_flag == AOM_GOLD_FLAG)
ref_frame = GOLDEN_FRAME;
#if CONFIG_EXT_REFS
- else if (ref_frame_flag == VPX_BWD_FLAG)
+ else if (ref_frame_flag == AOM_BWD_FLAG)
ref_frame = BWDREF_FRAME;
#endif // CONFIG_EXT_REFS
- else if (ref_frame_flag == VPX_ALT_FLAG)
+ else if (ref_frame_flag == AOM_ALT_FLAG)
ref_frame = ALTREF_FRAME;
return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
}
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
- YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
- if (cfg) {
- vpx_yv12_copy_frame(cfg, sd);
- return 0;
- } else {
- return -1;
- }
-}
-
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
- YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+ YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
if (cfg) {
- vpx_yv12_copy_frame(sd, cfg);
+ aom_yv12_copy_frame(cfg, sd);
return 0;
} else {
return -1;
}
}
-int vp10_update_entropy(VP10_COMP *cpi, int update) {
+int av1_set_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
+ if (cfg) {
+ aom_yv12_copy_frame(sd, cfg);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int av1_update_entropy(AV1_COMP *cpi, int update) {
cpi->ext_refresh_frame_context = update;
cpi->ext_refresh_frame_context_pending = 1;
return 0;
@@ -2699,7 +2699,7 @@
// as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
// not denoise the UV channels at this time. If ever we implement UV channel
// denoising we will have to modify this.
-void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
+void aom_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
uint8_t *src = s->y_buffer;
int h = s->y_height;
@@ -2727,9 +2727,9 @@
#endif
#if CONFIG_EXT_REFS
-static void check_show_existing_frame(VP10_COMP *cpi) {
+static void check_show_existing_frame(AV1_COMP *cpi) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const FRAME_UPDATE_TYPE next_frame_update_type =
gf_group->update_type[gf_group->index];
const int which_arf = gf_group->arf_update_idx[gf_group->index];
@@ -2744,7 +2744,7 @@
(next_frame_update_type == OVERLAY_UPDATE ||
next_frame_update_type == INTNL_OVERLAY_UPDATE)) {
// Other parameters related to OVERLAY_UPDATE will be taken care of
- // in vp10_rc_get_second_pass_params(cpi)
+ // in av1_rc_get_second_pass_params(cpi)
cm->show_existing_frame = 1;
cpi->rc.is_src_frame_alt_ref = 1;
cpi->existing_fb_idx_to_show = cpi->alt_fb_idx;
@@ -2757,11 +2757,11 @@
#endif // CONFIG_EXT_REFS
#ifdef OUTPUT_YUV_REC
-void vp10_write_one_yuv_frame(VP10_COMMON *cm, YV12_BUFFER_CONFIG *s) {
+void aom_write_one_yuv_frame(AV1_COMMON *cm, YV12_BUFFER_CONFIG *s) {
uint8_t *src = s->y_buffer;
int h = cm->height;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
@@ -2789,7 +2789,7 @@
fflush(yuv_rec_file);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
do {
fwrite(src, s->y_width, 1, yuv_rec_file);
@@ -2816,15 +2816,15 @@
}
#endif // OUTPUT_YUV_REC
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int bd) {
#else
static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst) {
-#endif // CONFIG_VP9_HIGHBITDEPTH
- // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ // TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
int i;
const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
src->v_buffer };
@@ -2841,31 +2841,31 @@
dst->uv_crop_height };
for (i = 0; i < MAX_MB_PLANE; ++i) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
- src_strides[i], dsts[i], dst_heights[i],
- dst_widths[i], dst_strides[i], bd);
+ av1_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
+ src_strides[i], dsts[i], dst_heights[i],
+ dst_widths[i], dst_strides[i], bd);
} else {
- vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
- dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+ av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+ dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
}
#else
- vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
- dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+ dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
- vpx_extend_frame_borders(dst);
+ aom_extend_frame_borders(dst);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int planes,
int bd) {
#else
static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int planes) {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const int src_w = src->y_crop_width;
const int src_h = src->y_crop_height;
const int dst_w = dst->y_crop_width;
@@ -2876,7 +2876,7 @@
uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
const InterpFilterParams interp_filter_params =
- vp10_get_interp_filter_params(EIGHTTAP_REGULAR);
+ av1_get_interp_filter_params(EIGHTTAP_REGULAR);
const int16_t *kernel = interp_filter_params.filter_ptr;
const int taps = interp_filter_params.taps;
int x, y, i;
@@ -2894,35 +2894,35 @@
(x / factor) * src_w / dst_w;
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
+ aom_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
&kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
&kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
16 / factor, 16 / factor, bd);
} else {
- vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+ aom_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
&kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
&kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
16 / factor, 16 / factor);
}
#else
- vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+ aom_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
&kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
&kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
16 / factor, 16 / factor);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
if (planes == 1)
- vpx_extend_frame_borders_y(dst);
+ aom_extend_frame_borders_y(dst);
else
- vpx_extend_frame_borders(dst);
+ aom_extend_frame_borders(dst);
}
-static int scale_down(VP10_COMP *cpi, int q) {
+static int scale_down(AV1_COMP *cpi, int q) {
RATE_CONTROL *const rc = &cpi->rc;
GF_GROUP *const gf_group = &cpi->twopass.gf_group;
int scale = 0;
@@ -2932,7 +2932,7 @@
q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
const int max_size_thresh =
(int)(rate_thresh_mult[SCALE_STEP1] *
- VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+ AOMMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
}
return scale;
@@ -2940,10 +2940,10 @@
// Function to test for conditions that indicate we should loop
// back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
- int q, int maxq, int minq) {
+static int recode_loop_test(AV1_COMP *cpi, int high_limit, int low_limit, int q,
+ int maxq, int minq) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
int force_recode = 0;
@@ -2961,7 +2961,7 @@
if ((rc->projected_frame_size > high_limit && q < maxq) ||
(rc->projected_frame_size < low_limit && q > minq)) {
force_recode = 1;
- } else if (cpi->oxcf.rc_mode == VPX_CQ) {
+ } else if (cpi->oxcf.rc_mode == AOM_CQ) {
// Deal with frame undershoot and whether or not we are
// below the automatically set cq level.
if (q > oxcf->cq_level &&
@@ -2985,9 +2985,9 @@
}
// Up-sample 1 reference frame.
-static INLINE int upsample_ref_frame(VP10_COMP *cpi,
+static INLINE int upsample_ref_frame(AV1_COMP *cpi,
const YV12_BUFFER_CONFIG *const ref) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
EncRefCntBuffer *ubufs = cpi->upsampled_ref_bufs;
int new_uidx = get_free_upsampled_ref_buf(ubufs);
@@ -2998,19 +2998,19 @@
// Can allocate buffer for Y plane only.
if (upsampled_ref->buffer_alloc_sz < (ref->buffer_alloc_sz << 6))
- if (vpx_realloc_frame_buffer(upsampled_ref, (cm->width << 3),
+ if (aom_realloc_frame_buffer(upsampled_ref, (cm->width << 3),
(cm->height << 3), cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- (VPX_ENC_BORDER_IN_PIXELS << 3),
+ (AOM_ENC_BORDER_IN_PIXELS << 3),
cm->byte_alignment, NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate up-sampled frame buffer");
// Currently, only Y plane is up-sampled, U, V are not used.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
scale_and_extend_frame(ref, upsampled_ref, 1, (int)cm->bit_depth);
#else
scale_and_extend_frame(ref, upsampled_ref, 1);
@@ -3022,7 +3022,7 @@
#define DUMP_REF_FRAME_IMAGES 0
#if DUMP_REF_FRAME_IMAGES == 1
-static int dump_one_image(VP10_COMMON *cm,
+static int dump_one_image(AV1_COMMON *cm,
const YV12_BUFFER_CONFIG *const ref_buf,
char *file_name) {
int h;
@@ -3030,12 +3030,12 @@
if (ref_buf == NULL) {
printf("Frame data buffer is NULL.\n");
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
if ((f_ref = fopen(file_name, "wb")) == NULL) {
printf("Unable to open file %s to write.\n", file_name);
- return VPX_CODEC_MEM_ERROR;
+ return AOM_CODEC_MEM_ERROR;
}
// --- Y ---
@@ -3055,11 +3055,11 @@
fclose(f_ref);
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
-static void dump_ref_frame_images(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void dump_ref_frame_images(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
@@ -3076,7 +3076,7 @@
// as follows:
// LAST_FRAME -> LAST2_FRAME -> LAST3_FRAME
// when the LAST_FRAME is updated.
-static INLINE void shift_last_ref_frames(VP10_COMP *cpi) {
+static INLINE void shift_last_ref_frames(AV1_COMP *cpi) {
int ref_frame;
for (ref_frame = LAST_REF_FRAMES - 1; ref_frame > 0; --ref_frame) {
cpi->lst_fb_idxes[ref_frame] = cpi->lst_fb_idxes[ref_frame - 1];
@@ -3092,8 +3092,8 @@
}
#endif
-void vp10_update_reference_frames(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_update_reference_frames(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
const int use_upsampled_ref = cpi->sf.use_upsampled_references;
int new_uidx = 0;
@@ -3142,10 +3142,10 @@
uref_cnt_fb(cpi->upsampled_ref_bufs,
&cpi->upsampled_ref_idx[cpi->alt_fb_idx], new_uidx);
}
- } else if (vp10_preserve_existing_gf(cpi)) {
+ } else if (av1_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term in function
- // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+ // av1_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
// we're updating the GF with the current decoded frame, we save it to the
// ARF slot instead.
// We now have to update the ARF with the current frame and swap gld_fb_idx
@@ -3385,38 +3385,38 @@
#endif // DUMP_REF_FRAME_IMAGES
}
-static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
+static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
struct loopfilter *lf = &cm->lf;
if (is_lossless_requested(&cpi->oxcf)) {
lf->filter_level = 0;
} else {
- struct vpx_usec_timer timer;
+ struct aom_usec_timer timer;
- vpx_clear_system_state();
+ aom_clear_system_state();
- vpx_usec_timer_start(&timer);
+ aom_usec_timer_start(&timer);
#if CONFIG_LOOP_RESTORATION
- vp10_pick_filter_restoration(cpi->Source, cpi, cpi->sf.lpf_pick);
+ av1_pick_filter_restoration(cpi->Source, cpi, cpi->sf.lpf_pick);
#else
- vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
+ av1_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
#endif // CONFIG_LOOP_RESTORATION
- vpx_usec_timer_mark(&timer);
- cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+ aom_usec_timer_mark(&timer);
+ cpi->time_pick_lpf += aom_usec_timer_elapsed(&timer);
}
if (lf->filter_level > 0) {
#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
- vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+ av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
#else
if (cpi->num_workers > 1)
- vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
- lf->filter_level, 0, 0, cpi->workers,
- cpi->num_workers, &cpi->lf_row_sync);
+ av1_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+ lf->filter_level, 0, 0, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
- vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+ av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
#endif
}
#if CONFIG_DERING
@@ -3424,8 +3424,8 @@
cm->dering_level = 0;
} else {
cm->dering_level =
- vp10_dering_search(cm->frame_to_show, cpi->Source, cm, xd);
- vp10_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
+ av1_dering_search(cm->frame_to_show, cpi->Source, cm, xd);
+ av1_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
}
#endif // CONFIG_DERING
@@ -3438,31 +3438,31 @@
// TODO(yaowu): investigate per-segment CLPF decision and
// an optimal threshold, use 80 for now.
for (i = 0; i < MAX_SEGMENTS; i++)
- hq &= vp10_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
+ hq &= av1_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
if (!hq) { // Don't try filter if the entire image is nearly losslessly
// encoded
#if CLPF_FILTER_ALL_PLANES
- vpx_yv12_copy_frame(cm->frame_to_show, &cpi->last_frame_uf);
- before = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
- vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
- vpx_get_v_sse(cpi->Source, cm->frame_to_show);
- vp10_clpf_frame(cm->frame_to_show, cm, xd);
- after = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
- vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
- vpx_get_v_sse(cpi->Source, cm->frame_to_show);
+ aom_yv12_copy_frame(cm->frame_to_show, &cpi->last_frame_uf);
+ before = aom_get_y_sse(cpi->Source, cm->frame_to_show) +
+ aom_get_u_sse(cpi->Source, cm->frame_to_show) +
+ aom_get_v_sse(cpi->Source, cm->frame_to_show);
+ av1_clpf_frame(cm->frame_to_show, cm, xd);
+ after = aom_get_y_sse(cpi->Source, cm->frame_to_show) +
+ aom_get_u_sse(cpi->Source, cm->frame_to_show) +
+ aom_get_v_sse(cpi->Source, cm->frame_to_show);
#else
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
- before = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
- vp10_clpf_frame(cm->frame_to_show, cm, xd);
- after = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+ before = aom_get_y_sse(cpi->Source, cm->frame_to_show);
+ av1_clpf_frame(cm->frame_to_show, cm, xd);
+ after = aom_get_y_sse(cpi->Source, cm->frame_to_show);
#endif
if (before < after) {
// No improvement, restore original
#if CLPF_FILTER_ALL_PLANES
- vpx_yv12_copy_frame(&cpi->last_frame_uf, cm->frame_to_show);
+ aom_yv12_copy_frame(&cpi->last_frame_uf, cm->frame_to_show);
#else
- vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+ aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
#endif
} else {
cm->clpf = 1;
@@ -3472,47 +3472,47 @@
#endif
#if CONFIG_LOOP_RESTORATION
if (cm->rst_info.restoration_type != RESTORE_NONE) {
- vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
- cm->frame_type == KEY_FRAME, cm->width,
- cm->height);
- vp10_loop_restoration_rows(cm->frame_to_show, cm, 0, cm->mi_rows, 0);
+ av1_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+ cm->frame_type == KEY_FRAME, cm->width,
+ cm->height);
+ av1_loop_restoration_rows(cm->frame_to_show, cm, 0, cm->mi_rows, 0);
}
#endif // CONFIG_LOOP_RESTORATION
- vpx_extend_frame_inner_borders(cm->frame_to_show);
+ aom_extend_frame_inner_borders(cm->frame_to_show);
}
-static INLINE void alloc_frame_mvs(VP10_COMMON *const cm, int buffer_idx) {
+static INLINE void alloc_frame_mvs(AV1_COMMON *const cm, int buffer_idx) {
RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
new_fb_ptr->mi_cols < cm->mi_cols) {
- vpx_free(new_fb_ptr->mvs);
+ aom_free(new_fb_ptr->mvs);
CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
- (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
sizeof(*new_fb_ptr->mvs)));
new_fb_ptr->mi_rows = cm->mi_rows;
new_fb_ptr->mi_cols = cm->mi_cols;
}
}
-void vp10_scale_references(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_scale_references(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
- const VPX_REFFRAME ref_mask[INTER_REFS_PER_FRAME] = {
- VPX_LAST_FLAG,
+ const AOM_REFFRAME ref_mask[INTER_REFS_PER_FRAME] = {
+ AOM_LAST_FLAG,
#if CONFIG_EXT_REFS
- VPX_LAST2_FLAG,
- VPX_LAST3_FLAG,
+ AOM_LAST2_FLAG,
+ AOM_LAST3_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_GOLD_FLAG,
+ AOM_GOLD_FLAG,
#if CONFIG_EXT_REFS
- VPX_BWD_FLAG,
+ AOM_BWD_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_ALT_FLAG
+ AOM_ALT_FLAG
};
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
- // Need to convert from VPX_REFFRAME to index into ref_mask (subtract 1).
+ // Need to convert from AOM_REFFRAME to index into ref_mask (subtract 1).
if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
BufferPool *const pool = cm->buffer_pool;
const YV12_BUFFER_CONFIG *const ref =
@@ -3523,7 +3523,7 @@
continue;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
RefCntBuffer *new_fb_ptr = NULL;
int force_scaling = 0;
@@ -3536,12 +3536,12 @@
new_fb_ptr = &pool->frame_bufs[new_fb];
if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
- if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+ if (aom_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
cm->use_highbitdepth,
- VPX_ENC_BORDER_IN_PIXELS,
+ AOM_ENC_BORDER_IN_PIXELS,
cm->byte_alignment, NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE,
(int)cm->bit_depth);
@@ -3561,17 +3561,17 @@
new_fb_ptr = &pool->frame_bufs[new_fb];
if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
- if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+ if (aom_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
- VPX_ENC_BORDER_IN_PIXELS,
+ AOM_ENC_BORDER_IN_PIXELS,
cm->byte_alignment, NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE);
cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
alloc_frame_mvs(cm, new_fb);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cpi->sf.use_upsampled_references &&
(force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
@@ -3580,17 +3580,17 @@
EncRefCntBuffer *ubuf =
&cpi->upsampled_ref_bufs[cpi->upsampled_ref_idx[map_idx]];
- if (vpx_realloc_frame_buffer(&ubuf->buf, (cm->width << 3),
+ if (aom_realloc_frame_buffer(&ubuf->buf, (cm->width << 3),
(cm->height << 3), cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- (VPX_ENC_BORDER_IN_PIXELS << 3),
+ (AOM_ENC_BORDER_IN_PIXELS << 3),
cm->byte_alignment, NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate up-sampled frame buffer");
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
scale_and_extend_frame(&new_fb_ptr->buf, &ubuf->buf, 1,
(int)cm->bit_depth);
#else
@@ -3611,8 +3611,8 @@
}
}
-static void release_scaled_references(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+static void release_scaled_references(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
int i;
if (cpi->oxcf.pass == 0) {
// Only release scaled references under certain conditions:
@@ -3664,8 +3664,8 @@
model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
}
-void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
- vp10_coeff_count *full_count) {
+void av1_full_to_model_counts(av1_coeff_count_model *model_count,
+ av1_coeff_count *full_count) {
int i, j, k, l;
for (i = 0; i < PLANE_TYPES; ++i)
@@ -3676,14 +3676,14 @@
}
#if 0 && CONFIG_INTERNAL_STATS
-static void output_frame_level_debug_stats(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void output_frame_level_debug_stats(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
int64_t recon_err;
- vpx_clear_system_state();
+ aom_clear_system_state();
- recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ recon_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
if (cpi->twopass.total_left_stats.coded_error != 0.0)
fprintf(f, "%10u %dx%d %10d %10d %d %d %10d %10d %10d %10d"
@@ -3710,12 +3710,12 @@
cpi->rc.total_target_vs_actual,
(cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
cpi->rc.total_actual_bits, cm->base_qindex,
- vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
- (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
- vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality,
+ av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
+ (double)av1_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
+ av1_convert_qindex_to_q(cpi->twopass.active_worst_quality,
cm->bit_depth),
cpi->rc.avg_q,
- vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
+ av1_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
cpi->refresh_last_frame, cpi->refresh_golden_frame,
cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
cpi->twopass.bits_left,
@@ -3746,12 +3746,12 @@
}
#endif
-static void set_mv_search_params(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
- const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
+static void set_mv_search_params(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
+ const unsigned int max_mv_def = AOMMIN(cm->width, cm->height);
// Default based on max resolution.
- cpi->mv_step_param = vp10_init_search_range(max_mv_def);
+ cpi->mv_step_param = av1_init_search_range(max_mv_def);
if (cpi->sf.mv.auto_mv_step_size) {
if (frame_is_intra_only(cm)) {
@@ -3763,34 +3763,34 @@
// Allow mv_steps to correspond to twice the max mv magnitude found
// in the previous frame, capped by the default max_mv_magnitude based
// on resolution.
- cpi->mv_step_param = vp10_init_search_range(
- VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
+ cpi->mv_step_param = av1_init_search_range(
+ AOMMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
}
cpi->max_mv_magnitude = 0;
}
}
}
-static void set_size_independent_vars(VP10_COMP *cpi) {
- vp10_set_speed_features_framesize_independent(cpi);
- vp10_set_rd_speed_thresholds(cpi);
- vp10_set_rd_speed_thresholds_sub8x8(cpi);
+static void set_size_independent_vars(AV1_COMP *cpi) {
+ av1_set_speed_features_framesize_independent(cpi);
+ av1_set_rd_speed_thresholds(cpi);
+ av1_set_rd_speed_thresholds_sub8x8(cpi);
cpi->common.interp_filter = cpi->sf.default_interp_filter;
}
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+static void set_size_dependent_vars(AV1_COMP *cpi, int *q, int *bottom_index,
int *top_index) {
- VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
// Setup variables that depend on the dimensions of the frame.
- vp10_set_speed_features_framesize_dependent(cpi);
+ av1_set_speed_features_framesize_dependent(cpi);
// Decide q and q bounds.
- *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+ *q = av1_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
if (!frame_is_intra_only(cm)) {
- vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+ av1_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
}
// Configure experimental use of segmentation for enhanced coding of
@@ -3801,34 +3801,34 @@
configure_static_seg_features(cpi);
}
-static void init_motion_estimation(VP10_COMP *cpi) {
+static void init_motion_estimation(AV1_COMP *cpi) {
int y_stride = cpi->scaled_source.y_stride;
if (cpi->sf.mv.search_method == NSTEP) {
- vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+ av1_init3smotion_compensation(&cpi->ss_cfg, y_stride);
} else if (cpi->sf.mv.search_method == DIAMOND) {
- vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+ av1_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
}
}
-static void set_frame_size(VP10_COMP *cpi) {
+static void set_frame_size(AV1_COMP *cpi) {
int ref_frame;
- VP10_COMMON *const cm = &cpi->common;
- VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ AV1EncoderConfig *const oxcf = &cpi->oxcf;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
+ if (oxcf->pass == 2 && oxcf->rc_mode == AOM_VBR &&
((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
(oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
- vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
- &oxcf->scaled_frame_height);
+ av1_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+ &oxcf->scaled_frame_height);
// There has been a change in frame size.
- vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
- oxcf->scaled_frame_height);
+ av1_set_size_literal(cpi, oxcf->scaled_frame_width,
+ oxcf->scaled_frame_height);
}
- if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+ if (oxcf->pass == 0 && oxcf->rc_mode == AOM_CBR &&
oxcf->resize_mode == RESIZE_DYNAMIC) {
if (cpi->resize_pending == 1) {
oxcf->scaled_frame_width =
@@ -3842,8 +3842,8 @@
}
if (cpi->resize_pending != 0) {
// There has been a change in frame size.
- vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
- oxcf->scaled_frame_height);
+ av1_set_size_literal(cpi, oxcf->scaled_frame_width,
+ oxcf->scaled_frame_height);
// TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
set_mv_search_params(cpi);
@@ -3851,20 +3851,20 @@
}
if (oxcf->pass == 2) {
- vp10_set_target_rate(cpi);
+ av1_set_target_rate(cpi);
}
alloc_frame_mvs(cm, cm->new_fb_idx);
// Reset the frame pointers to the current frame size.
- if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
+ if (aom_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
alloc_util_frame_buffers(cpi);
@@ -3879,16 +3879,16 @@
if (buf_idx != INVALID_IDX) {
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
ref_buf->buf = buf;
-#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
#else
- vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
- buf->y_crop_height, cm->width,
- cm->height);
-#endif // CONFIG_VP9_HIGHBITDEPTH
- if (vp10_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
+ av1_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+ buf->y_crop_height, cm->width,
+ cm->height);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ if (av1_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
} else {
ref_buf->buf = NULL;
}
@@ -3897,7 +3897,7 @@
set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
}
-static void reset_use_upsampled_references(VP10_COMP *cpi) {
+static void reset_use_upsampled_references(AV1_COMP *cpi) {
MV_REFERENCE_FRAME ref_frame;
// reset up-sampled reference buffer structure.
@@ -3913,36 +3913,36 @@
}
}
-static void encode_without_recode_loop(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void encode_without_recode_loop(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int q = 0, bottom_index = 0, top_index = 0; // Dummy variables.
const int use_upsampled_ref = cpi->sf.use_upsampled_references;
- vpx_clear_system_state();
+ aom_clear_system_state();
set_frame_size(cpi);
// For 1 pass CBR under dynamic resize mode: use faster scaling for source.
// Only for 2x2 scaling for now.
- if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
+ if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == AOM_CBR &&
cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
cpi->un_scaled_source->y_width == (cm->width << 1) &&
cpi->un_scaled_source->y_height == (cm->height << 1)) {
- cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
- &cpi->scaled_source);
+ cpi->Source = av1_scale_if_required_fast(cm, cpi->un_scaled_source,
+ &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required_fast(
+ cpi->Last_Source = av1_scale_if_required_fast(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
} else {
cpi->Source =
- vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+ av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
- &cpi->scaled_last_source);
+ cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
+ &cpi->scaled_last_source);
}
if (frame_is_intra_only(cm) == 0) {
- vp10_scale_references(cpi);
+ av1_scale_references(cpi);
}
set_size_independent_vars(cpi);
@@ -3954,49 +3954,49 @@
if (!use_upsampled_ref && cpi->sf.use_upsampled_references)
reset_use_upsampled_references(cpi);
- vp10_set_quantizer(cm, q);
- vp10_set_variance_partition_thresholds(cpi, q);
+ av1_set_quantizer(cm, q);
+ av1_set_variance_partition_thresholds(cpi, q);
setup_frame(cpi);
#if CONFIG_ENTROPY
cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
- vp10_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
cm->coef_probs_update_idx = 0;
- vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+ av1_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
#endif // CONFIG_ENTROPY
suppress_active_map(cpi);
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
- vp10_vaq_frame_setup(cpi);
+ av1_vaq_frame_setup(cpi);
} else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- vp10_setup_in_frame_q_adj(cpi);
+ av1_setup_in_frame_q_adj(cpi);
} else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp10_cyclic_refresh_setup(cpi);
+ av1_cyclic_refresh_setup(cpi);
}
apply_active_map(cpi);
// transform / motion compensation build reconstruction frame
- vp10_encode_frame(cpi);
+ av1_encode_frame(cpi);
// Update some stats from cyclic refresh, and check if we should not update
// golden reference, for 1 pass CBR.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
- (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
- vp10_cyclic_refresh_check_golden_update(cpi);
+ (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == AOM_CBR))
+ av1_cyclic_refresh_check_golden_update(cpi);
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
// update_base_skip_probs(cpi);
- vpx_clear_system_state();
+ aom_clear_system_state();
}
-static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
+static void encode_with_recode_loop(AV1_COMP *cpi, size_t *size,
uint8_t *dest) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int bottom_index, top_index;
int loop_count = 0;
@@ -4018,7 +4018,7 @@
reset_use_upsampled_references(cpi);
do {
- vpx_clear_system_state();
+ aom_clear_system_state();
set_frame_size(cpi);
@@ -4043,26 +4043,26 @@
// Decide frame size bounds first time through.
if (loop_count == 0) {
- vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
- &frame_under_shoot_limit,
- &frame_over_shoot_limit);
+ av1_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
+ &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
}
cpi->Source =
- vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+ av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
- &cpi->scaled_last_source);
+ cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
+ &cpi->scaled_last_source);
if (frame_is_intra_only(cm) == 0) {
if (loop_count > 0) {
release_scaled_references(cpi);
}
- vp10_scale_references(cpi);
+ av1_scale_references(cpi);
}
- vp10_set_quantizer(cm, q);
+ av1_set_quantizer(cm, q);
if (loop_count == 0) setup_frame(cpi);
@@ -4071,7 +4071,7 @@
// probs before every iteration.
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
int i;
- vp10_default_coef_probs(cm);
+ av1_default_coef_probs(cm);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
@@ -4085,39 +4085,38 @@
cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
if (loop_count == 0 || frame_is_intra_only(cm) ||
cm->error_resilient_mode) {
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
- vp10_copy(cpi->subframe_stats.enc_starting_coef_probs,
- cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
} else {
if (cm->do_subframe_update) {
- vp10_copy(cm->fc->coef_probs,
- cpi->subframe_stats.enc_starting_coef_probs);
- vp10_copy(cm->starting_coef_probs,
- cpi->subframe_stats.enc_starting_coef_probs);
- vp10_zero(cpi->subframe_stats.coef_counts_buf);
- vp10_zero(cpi->subframe_stats.eob_counts_buf);
+ av1_copy(cm->fc->coef_probs,
+ cpi->subframe_stats.enc_starting_coef_probs);
+ av1_copy(cm->starting_coef_probs,
+ cpi->subframe_stats.enc_starting_coef_probs);
+ av1_zero(cpi->subframe_stats.coef_counts_buf);
+ av1_zero(cpi->subframe_stats.eob_counts_buf);
}
}
cm->coef_probs_update_idx = 0;
- vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+ av1_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
#endif // CONFIG_ENTROPY
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
- vp10_vaq_frame_setup(cpi);
+ av1_vaq_frame_setup(cpi);
} else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- vp10_setup_in_frame_q_adj(cpi);
+ av1_setup_in_frame_q_adj(cpi);
}
// transform / motion compensation build reconstruction frame
- vp10_encode_frame(cpi);
+ av1_encode_frame(cpi);
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
// update_base_skip_probs(cpi);
- vpx_clear_system_state();
+ aom_clear_system_state();
// Dummy pack of the bitstream using up to date stats to get an
// accurate estimate of output frame size to determine if we need
@@ -4125,7 +4124,7 @@
if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
save_coding_context(cpi);
- vp10_pack_bitstream(cpi, dest, size);
+ av1_pack_bitstream(cpi, dest, size);
rc->projected_frame_size = (int)(*size) << 3;
restore_coding_context(cpi);
@@ -4133,7 +4132,7 @@
if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
}
- if (cpi->oxcf.rc_mode == VPX_Q) {
+ if (cpi->oxcf.rc_mode == AOM_Q) {
loop = 0;
} else {
if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
@@ -4144,15 +4143,15 @@
int64_t high_err_target = cpi->ambient_err;
int64_t low_err_target = cpi->ambient_err >> 1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ kf_err = aom_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
- kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ kf_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
#else
- kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ kf_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Prevent possible divide by zero error below for perfect KF
kf_err += !kf_err;
@@ -4168,7 +4167,7 @@
// Adjust Q
q = (int)((q * high_err_target) / kf_err);
- q = VPXMIN(q, (q_high + q_low) >> 1);
+ q = AOMMIN(q, (q_high + q_low) >> 1);
} else if (kf_err < low_err_target &&
rc->projected_frame_size >= frame_under_shoot_limit) {
// The key frame is much better than the previous frame
@@ -4177,7 +4176,7 @@
// Adjust Q
q = (int)((q * low_err_target) / kf_err);
- q = VPXMIN(q, (q_high + q_low + 1) >> 1);
+ q = AOMMIN(q, (q_high + q_low + 1) >> 1);
}
// Clamp Q to upper and lower limits:
@@ -4186,7 +4185,7 @@
loop = q != last_q;
} else if (recode_loop_test(cpi, frame_over_shoot_limit,
frame_under_shoot_limit, q,
- VPXMAX(q_high, top_index), bottom_index)) {
+ AOMMAX(q_high, top_index), bottom_index)) {
// Is the projected frame size out of range and are we allowed
// to attempt to recode.
int last_q = q;
@@ -4220,20 +4219,20 @@
if (undershoot_seen || loop_at_this_size > 1) {
// Update rate_correction_factor unless
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low + 1) / 2;
} else {
// Update rate_correction_factor unless
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
- VPXMAX(q_high, top_index));
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ AOMMAX(q_high, top_index));
while (q < q_low && retries < 10) {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
- VPXMAX(q_high, top_index));
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ AOMMAX(q_high, top_index));
retries++;
}
}
@@ -4244,24 +4243,24 @@
q_high = q > q_low ? q - 1 : q_low;
if (overshoot_seen || loop_at_this_size > 1) {
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low) / 2;
} else {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
- top_index);
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
// Special case reset for qlow for constrained quality.
// This should only trigger where there is very substantial
// undershoot on a frame and the auto cq level is above
// the user passsed in value.
- if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
+ if (cpi->oxcf.rc_mode == AOM_CQ && q < q_low) {
q_low = q;
}
while (q > q_high && retries < 10) {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
- top_index);
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
retries++;
}
}
@@ -4294,7 +4293,7 @@
} while (loop);
}
-static int get_ref_frame_flags(const VP10_COMP *cpi) {
+static int get_ref_frame_flags(const AV1_COMP *cpi) {
const int *const map = cpi->common.ref_frame_map;
#if CONFIG_EXT_REFS
@@ -4326,40 +4325,40 @@
const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
#endif // CONFIG_EXT_REFS
- int flags = VPX_REFFRAME_ALL;
+ int flags = AOM_REFFRAME_ALL;
#if CONFIG_EXT_REFS
// Disable the use of BWDREF_FRAME for non-bipredictive frames.
if (!(cpi->rc.is_bipred_frame || cpi->rc.is_last_bipred_frame ||
(cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs)))
- flags &= ~VPX_BWD_FLAG;
+ flags &= ~AOM_BWD_FLAG;
#endif // CONFIG_EXT_REFS
- if (gld_is_last || gld_is_alt) flags &= ~VPX_GOLD_FLAG;
+ if (gld_is_last || gld_is_alt) flags &= ~AOM_GOLD_FLAG;
- if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~VPX_GOLD_FLAG;
+ if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~AOM_GOLD_FLAG;
- if (alt_is_last) flags &= ~VPX_ALT_FLAG;
+ if (alt_is_last) flags &= ~AOM_ALT_FLAG;
#if CONFIG_EXT_REFS
- if (last2_is_last || last2_is_alt) flags &= ~VPX_LAST2_FLAG;
+ if (last2_is_last || last2_is_alt) flags &= ~AOM_LAST2_FLAG;
- if (last3_is_last || last3_is_last2 || last3_is_alt) flags &= ~VPX_LAST3_FLAG;
+ if (last3_is_last || last3_is_last2 || last3_is_alt) flags &= ~AOM_LAST3_FLAG;
- if (gld_is_last2 || gld_is_last3) flags &= ~VPX_GOLD_FLAG;
+ if (gld_is_last2 || gld_is_last3) flags &= ~AOM_GOLD_FLAG;
if ((bwd_is_last || bwd_is_last2 || bwd_is_last3 || bwd_is_gld ||
bwd_is_alt) &&
- (flags & VPX_BWD_FLAG))
- flags &= ~VPX_BWD_FLAG;
+ (flags & AOM_BWD_FLAG))
+ flags &= ~AOM_BWD_FLAG;
#endif // CONFIG_EXT_REFS
return flags;
}
-static void set_ext_overrides(VP10_COMP *cpi) {
+static void set_ext_overrides(AV1_COMP *cpi) {
// Overrides the defaults with the externally supplied values with
- // vp10_update_reference() and vp10_update_entropy() calls
+ // av1_update_reference() and av1_update_entropy() calls
// Note: The overrides are valid only for the next frame passed
// to encode_frame_to_data_rate() function
if (cpi->ext_refresh_frame_context_pending) {
@@ -4374,38 +4373,38 @@
}
}
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled) {
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
// For 2x2 scaling down.
- vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
- vpx_extend_frame_borders(scaled);
+ aom_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
+ aom_extend_frame_borders(scaled);
return scaled;
} else {
return unscaled;
}
}
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled) {
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
#else
scale_and_extend_frame_nonnormative(unscaled, scaled);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return scaled;
} else {
return unscaled;
}
}
-static void set_arf_sign_bias(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_arf_sign_bias(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int arf_sign_bias;
#if CONFIG_EXT_REFS
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@@ -4430,7 +4429,7 @@
#endif // CONFIG_EXT_REFS
}
-static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
+static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
INTERP_FILTER ifilter;
int ref_total[TOTAL_REFS_PER_FRAME] = { 0 };
MV_REFERENCE_FRAME ref;
@@ -4490,8 +4489,8 @@
#if DUMP_RECON_FRAMES == 1
// NOTE(zoeliu): For debug - Output the filtered reconstructed video.
-static void dump_filtered_recon_frames(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void dump_filtered_recon_frames(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const YV12_BUFFER_CONFIG *recon_buf = cm->frame_to_show;
int h;
char file_name[256] = "/tmp/enc_filtered_recon.yuv";
@@ -4542,15 +4541,15 @@
}
#endif // DUMP_RECON_FRAMES
-static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
+static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
uint8_t *dest,
unsigned int *frame_flags) {
- VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
struct segmentation *const seg = &cm->seg;
TX_SIZE t;
set_ext_overrides(cpi);
- vpx_clear_system_state();
+ aom_clear_system_state();
// Set the arf sign bias for this frame.
set_arf_sign_bias(cpi);
@@ -4585,7 +4584,7 @@
cpi->rc.is_bipred_frame = 0;
// Build the bitstream
- vp10_pack_bitstream(cpi, dest, size);
+ av1_pack_bitstream(cpi, dest, size);
// Set up frame to show to get ready for stats collection.
cm->frame_to_show = get_frame_new_buffer(cm);
@@ -4596,7 +4595,7 @@
#endif // DUMP_RECON_FRAMES
// Update the LAST_FRAME in the reference frame buffer.
- vp10_update_reference_frames(cpi);
+ av1_update_reference_frames(cpi);
// Update frame flags
cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN;
@@ -4612,8 +4611,8 @@
// Since we allocate a spot for the OVERLAY frame in the gf group, we need
// to do post-encoding update accordingly.
if (cpi->rc.is_src_frame_alt_ref) {
- vp10_set_target_rate(cpi);
- vp10_rc_postencode_update(cpi, *size);
+ av1_set_target_rate(cpi);
+ av1_rc_postencode_update(cpi, *size);
}
#endif
@@ -4635,7 +4634,7 @@
// Set various flags etc to special state if it is a key frame.
if (frame_is_intra_only(cm)) {
// Reset the loop filter deltas and segmentation map.
- vp10_reset_segment_features(cm);
+ av1_reset_segment_features(cm);
// If segmentation is enabled force a map update for key frames.
if (seg->enabled) {
@@ -4660,16 +4659,16 @@
// For 1 pass CBR, check if we are dropping this frame.
// Never drop on key frame.
- if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+ if (oxcf->pass == 0 && oxcf->rc_mode == AOM_CBR &&
cm->frame_type != KEY_FRAME) {
- if (vp10_rc_drop_frame(cpi)) {
- vp10_rc_postencode_update_drop_frame(cpi);
+ if (av1_rc_drop_frame(cpi)) {
+ av1_rc_postencode_update_drop_frame(cpi);
++cm->current_video_frame;
return;
}
}
- vpx_clear_system_state();
+ aom_clear_system_state();
#if CONFIG_INTERNAL_STATS
memset(cpi->mode_chosen_counts, 0,
@@ -4684,7 +4683,7 @@
#ifdef OUTPUT_YUV_SKINMAP
if (cpi->common.current_video_frame > 1) {
- vp10_compute_skin_map(cpi, yuv_skinmap_file);
+ av1_compute_skin_map(cpi, yuv_skinmap_file);
}
#endif // OUTPUT_YUV_SKINMAP
@@ -4692,16 +4691,16 @@
// fixed interval. Note the reconstruction error if it is the frame before
// the force key frame
if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
cpi->ambient_err =
- vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ aom_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
- cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ cpi->ambient_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
#else
- cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ cpi->ambient_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
// If the encoder forced a KEY_FRAME decision
@@ -4724,7 +4723,7 @@
loopfilter_frame(cpi, cm);
// Build the bitstream
- vp10_pack_bitstream(cpi, dest, size);
+ av1_pack_bitstream(cpi, dest, size);
#if DUMP_RECON_FRAMES == 1
// NOTE(zoeliu): For debug - Output the filtered reconstructed video.
@@ -4737,24 +4736,24 @@
release_scaled_references(cpi);
}
- vp10_update_reference_frames(cpi);
+ av1_update_reference_frames(cpi);
for (t = TX_4X4; t <= TX_32X32; t++)
- vp10_full_to_model_counts(cpi->td.counts->coef[t],
- cpi->td.rd_counts.coef_counts[t]);
+ av1_full_to_model_counts(cpi->td.counts->coef[t],
+ cpi->td.rd_counts.coef_counts[t]);
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
#if CONFIG_ENTROPY
cm->partial_prob_update = 0;
#endif // CONFIG_ENTROPY
- vp10_adapt_coef_probs(cm);
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_coef_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
}
if (!frame_is_intra_only(cm)) {
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_adapt_inter_frame_probs(cm);
- vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ av1_adapt_inter_frame_probs(cm);
+ av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
}
@@ -4785,7 +4784,7 @@
#endif // CONFIG_EXT_REFS
cm->last_frame_type = cm->frame_type;
- vp10_rc_postencode_update(cpi, *size);
+ av1_rc_postencode_update(cpi, *size);
#if 0
output_frame_level_debug_stats(cpi);
@@ -4816,7 +4815,7 @@
// TODO(zoeliu): We may only swamp mi and prev_mi for those frames that are
// being used as reference.
#endif // CONFIG_EXT_REFS
- vp10_swap_mi_and_prev_mi(cm);
+ av1_swap_mi_and_prev_mi(cm);
// Don't increment frame counters if this was an altref buffer
// update not a real frame
++cm->current_video_frame;
@@ -4829,17 +4828,17 @@
cm->prev_frame = cm->cur_frame;
}
-static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass0Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
unsigned int *frame_flags) {
- if (cpi->oxcf.rc_mode == VPX_CBR) {
- vp10_rc_get_one_pass_cbr_params(cpi);
+ if (cpi->oxcf.rc_mode == AOM_CBR) {
+ av1_rc_get_one_pass_cbr_params(cpi);
} else {
- vp10_rc_get_one_pass_vbr_params(cpi);
+ av1_rc_get_one_pass_vbr_params(cpi);
}
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
}
-static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass2Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
unsigned int *frame_flags) {
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
@@ -4850,15 +4849,15 @@
// a gf group, but note that an OVERLAY frame always has a spot in a gf group,
// even when show_existing_frame is used.
if (!cpi->common.show_existing_frame || cpi->rc.is_src_frame_alt_ref) {
- vp10_twopass_postencode_update(cpi);
+ av1_twopass_postencode_update(cpi);
}
check_show_existing_frame(cpi);
#else
- vp10_twopass_postencode_update(cpi);
+ av1_twopass_postencode_update(cpi);
#endif // CONFIG_EXT_REFS
}
-static void init_ref_frame_bufs(VP10_COMMON *cm) {
+static void init_ref_frame_bufs(AV1_COMMON *cm) {
int i;
BufferPool *const pool = cm->buffer_pool;
cm->new_fb_idx = INVALID_IDX;
@@ -4868,22 +4867,22 @@
}
}
-static void check_initial_width(VP10_COMP *cpi,
-#if CONFIG_VP9_HIGHBITDEPTH
+static void check_initial_width(AV1_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int subsampling_x, int subsampling_y) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (!cpi->initial_width ||
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth != use_highbitdepth ||
#endif
cm->subsampling_x != subsampling_x ||
cm->subsampling_y != subsampling_y) {
cm->subsampling_x = subsampling_x;
cm->subsampling_y = subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = use_highbitdepth;
#endif
@@ -4899,44 +4898,44 @@
}
}
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time) {
- VP10_COMMON *const cm = &cpi->common;
- struct vpx_usec_timer timer;
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time) {
+ AV1_COMMON *const cm = &cpi->common;
+ struct aom_usec_timer timer;
int res = 0;
const int subsampling_x = sd->subsampling_x;
const int subsampling_y = sd->subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0;
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
#else
check_initial_width(cpi, subsampling_x, subsampling_y);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
- vpx_usec_timer_start(&timer);
+ aom_usec_timer_start(&timer);
- if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
-#if CONFIG_VP9_HIGHBITDEPTH
- use_highbitdepth,
-#endif // CONFIG_VP9_HIGHBITDEPTH
- frame_flags))
+ if (av1_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_AOM_HIGHBITDEPTH
+ use_highbitdepth,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ frame_flags))
res = -1;
- vpx_usec_timer_mark(&timer);
- cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+ aom_usec_timer_mark(&timer);
+ cpi->time_receive_data += aom_usec_timer_elapsed(&timer);
if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) &&
(subsampling_x != 1 || subsampling_y != 1)) {
- vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+ aom_internal_error(&cm->error, AOM_CODEC_INVALID_PARAM,
"Non-4:2:0 color format requires profile 1 or 3");
res = -1;
}
if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) &&
(subsampling_x == 1 && subsampling_y == 1)) {
- vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+ aom_internal_error(&cm->error, AOM_CODEC_INVALID_PARAM,
"4:2:0 color format requires profile 0 or 2");
res = -1;
}
@@ -4944,8 +4943,8 @@
return res;
}
-static int frame_is_reference(const VP10_COMP *cpi) {
- const VP10_COMMON *cm = &cpi->common;
+static int frame_is_reference(const AV1_COMP *cpi) {
+ const AV1_COMMON *cm = &cpi->common;
return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
cpi->refresh_golden_frame ||
@@ -4957,7 +4956,7 @@
cm->seg.update_data;
}
-static void adjust_frame_rate(VP10_COMP *cpi,
+static void adjust_frame_rate(AV1_COMP *cpi,
const struct lookahead_entry *source) {
int64_t this_duration;
int step = 0;
@@ -4978,18 +4977,18 @@
if (this_duration) {
if (step) {
- vp10_new_framerate(cpi, 10000000.0 / this_duration);
+ av1_new_framerate(cpi, 10000000.0 / this_duration);
} else {
// Average this frame's rate into the last second's average
// frame rate. If we haven't seen 1 second yet, then average
// over the whole interval seen.
- const double interval = VPXMIN(
+ const double interval = AOMMIN(
(double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0);
double avg_duration = 10000000.0 / cpi->framerate;
avg_duration *= (interval - avg_duration + this_duration);
avg_duration /= interval;
- vp10_new_framerate(cpi, 10000000.0 / avg_duration);
+ av1_new_framerate(cpi, 10000000.0 / avg_duration);
}
}
cpi->last_time_stamp_seen = source->ts_start;
@@ -4998,7 +4997,7 @@
// Returns 0 if this is not an alt ref else the offset of the source frame
// used as the arf midpoint.
-static int get_arf_src_index(VP10_COMP *cpi) {
+static int get_arf_src_index(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
int arf_src_index = 0;
if (is_altref_enabled(cpi)) {
@@ -5015,7 +5014,7 @@
}
#if CONFIG_EXT_REFS
-static int get_brf_src_index(VP10_COMP *cpi) {
+static int get_brf_src_index(AV1_COMP *cpi) {
int brf_src_index = 0;
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@@ -5035,12 +5034,12 @@
}
#endif // CONFIG_EXT_REFS
-static void check_src_altref(VP10_COMP *cpi,
+static void check_src_altref(AV1_COMP *cpi,
const struct lookahead_entry *source) {
RATE_CONTROL *const rc = &cpi->rc;
// If pass == 2, the parameters set here will be reset in
- // vp10_rc_get_second_pass_params()
+ // av1_rc_get_second_pass_params()
if (cpi->oxcf.pass == 2) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@@ -5065,9 +5064,9 @@
}
#if CONFIG_INTERNAL_STATS
-extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
- const unsigned char *img2, int img2_pitch,
- int width, int height);
+extern double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
+ const unsigned char *img2, int img2_pitch,
+ int width, int height);
static void adjust_image_stat(double y, double u, double v, double all,
ImageStat *s) {
@@ -5075,16 +5074,16 @@
s->stat[U] += u;
s->stat[V] += v;
s->stat[ALL] += all;
- s->worst = VPXMIN(s->worst, all);
+ s->worst = AOMMIN(s->worst, all);
}
-static void compute_internal_stats(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void compute_internal_stats(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
double samples = 0.0;
uint32_t in_bit_depth = 8;
uint32_t bit_depth = 8;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
in_bit_depth = cpi->oxcf.input_bit_depth;
bit_depth = cm->bit_depth;
@@ -5099,13 +5098,13 @@
if (cpi->b_calculate_psnr) {
PSNR_STATS psnr;
double frame_ssim2 = 0.0, weight = 0.0;
- vpx_clear_system_state();
+ aom_clear_system_state();
// TODO(yaowu): unify these two versions into one.
-#if CONFIG_VP9_HIGHBITDEPTH
- vpx_calc_highbd_psnr(orig, recon, &psnr, bit_depth, in_bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
+ aom_calc_highbd_psnr(orig, recon, &psnr, bit_depth, in_bit_depth);
#else
- vpx_calc_psnr(orig, recon, &psnr);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ aom_calc_psnr(orig, recon, &psnr);
+#endif // CONFIG_AOM_HIGHBITDEPTH
adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3], psnr.psnr[0],
&cpi->psnr);
@@ -5113,17 +5112,17 @@
cpi->total_samples += psnr.samples[0];
samples = psnr.samples[0];
// TODO(yaowu): unify these two versions into one.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth)
frame_ssim2 =
- vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth, in_bit_depth);
+ aom_highbd_calc_ssim(orig, recon, &weight, bit_depth, in_bit_depth);
else
- frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
+ frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
#else
- frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
+#endif // CONFIG_AOM_HIGHBITDEPTH
- cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
+ cpi->worst_ssim = AOMMIN(cpi->worst_ssim, frame_ssim2);
cpi->summed_quality += frame_ssim2 * weight;
cpi->summed_weights += weight;
@@ -5138,54 +5137,54 @@
#endif
}
if (cpi->b_calculate_blockiness) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
const double frame_blockiness =
- vp10_get_blockiness(orig->y_buffer, orig->y_stride, recon->y_buffer,
- recon->y_stride, orig->y_width, orig->y_height);
- cpi->worst_blockiness = VPXMAX(cpi->worst_blockiness, frame_blockiness);
+ av1_get_blockiness(orig->y_buffer, orig->y_stride, recon->y_buffer,
+ recon->y_stride, orig->y_width, orig->y_height);
+ cpi->worst_blockiness = AOMMAX(cpi->worst_blockiness, frame_blockiness);
cpi->total_blockiness += frame_blockiness;
}
if (cpi->b_calculate_consistency) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
- const double this_inconsistency = vpx_get_ssim_metrics(
+ const double this_inconsistency = aom_get_ssim_metrics(
orig->y_buffer, orig->y_stride, recon->y_buffer, recon->y_stride,
orig->y_width, orig->y_height, cpi->ssim_vars, &cpi->metrics, 1);
const double peak = (double)((1 << in_bit_depth) - 1);
const double consistency =
- vpx_sse_to_psnr(samples, peak, cpi->total_inconsistency);
+ aom_sse_to_psnr(samples, peak, cpi->total_inconsistency);
if (consistency > 0.0)
cpi->worst_consistency =
- VPXMIN(cpi->worst_consistency, consistency);
+ AOMMIN(cpi->worst_consistency, consistency);
cpi->total_inconsistency += this_inconsistency;
}
}
}
frame_all =
- vpx_calc_fastssim(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+ aom_calc_fastssim(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
- frame_all = vpx_psnrhvs(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+ frame_all = aom_psnrhvs(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
}
}
#endif // CONFIG_INTERNAL_STATS
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest, int64_t *time_stamp,
- int64_t *time_end, int flush) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
- VP10_COMMON *const cm = &cpi->common;
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush) {
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
- struct vpx_usec_timer cmptimer;
+ struct aom_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
struct lookahead_entry *last_source = NULL;
struct lookahead_entry *source = NULL;
@@ -5201,9 +5200,9 @@
bitstream_queue_record_write();
#endif
- vpx_usec_timer_start(&cmptimer);
+ aom_usec_timer_start(&cmptimer);
- vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+ av1_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
// Is multi-arf enabled.
// Note that at the moment multi_arf is only configured for 2 pass VBR
@@ -5230,7 +5229,7 @@
if (oxcf->pass == 2 && cm->show_existing_frame) {
// Manage the source buffer and flush out the source frame that has been
// coded already; Also get prepared for PSNR calculation if needed.
- if ((source = vp10_lookahead_pop(cpi->lookahead, flush)) == NULL) {
+ if ((source = av1_lookahead_pop(cpi->lookahead, flush)) == NULL) {
*size = 0;
return -1;
}
@@ -5255,14 +5254,14 @@
if (cm->new_fb_idx == INVALID_IDX) return -1;
// Clear down mmx registers
- vpx_clear_system_state();
+ aom_clear_system_state();
// Start with a 0 size frame.
*size = 0;
// We need to update the gf_group for show_existing overlay frame
if (cpi->rc.is_src_frame_alt_ref) {
- vp10_rc_get_second_pass_params(cpi);
+ av1_rc_get_second_pass_params(cpi);
}
Pass2Encode(cpi, size, dest, frame_flags);
@@ -5275,7 +5274,7 @@
#endif // CONFIG_INTERNAL_STATS
// Clear down mmx registers
- vpx_clear_system_state();
+ aom_clear_system_state();
cm->show_existing_frame = 0;
return 0;
@@ -5286,11 +5285,11 @@
arf_src_index = get_arf_src_index(cpi);
if (arf_src_index) {
for (i = 0; i <= arf_src_index; ++i) {
- struct lookahead_entry *e = vp10_lookahead_peek(cpi->lookahead, i);
+ struct lookahead_entry *e = av1_lookahead_peek(cpi->lookahead, i);
// Avoid creating an alt-ref if there's a forced keyframe pending.
if (e == NULL) {
break;
- } else if (e->flags == VPX_EFLAG_FORCE_KF) {
+ } else if (e->flags == AOM_EFLAG_FORCE_KF) {
arf_src_index = 0;
flush = 1;
break;
@@ -5301,13 +5300,13 @@
if (arf_src_index) {
assert(arf_src_index <= rc->frames_to_key);
- if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
+ if ((source = av1_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
cpi->alt_ref_source = source;
if (oxcf->arnr_max_frames > 0) {
// Produce the filtered ARF frame.
- vp10_temporal_filter(cpi, arf_src_index);
- vpx_extend_frame_borders(&cpi->alt_ref_buffer);
+ av1_temporal_filter(cpi, arf_src_index);
+ aom_extend_frame_borders(&cpi->alt_ref_buffer);
force_src_buffer = &cpi->alt_ref_buffer;
}
@@ -5326,7 +5325,7 @@
brf_src_index = get_brf_src_index(cpi);
if (brf_src_index) {
assert(brf_src_index <= rc->frames_to_key);
- if ((source = vp10_lookahead_peek(cpi->lookahead, brf_src_index)) != NULL) {
+ if ((source = av1_lookahead_peek(cpi->lookahead, brf_src_index)) != NULL) {
cm->show_frame = 0;
cm->intra_only = 0;
@@ -5343,12 +5342,12 @@
if (!source) {
// Get last frame source.
if (cm->current_video_frame > 0) {
- if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL)
+ if ((last_source = av1_lookahead_peek(cpi->lookahead, -1)) == NULL)
return -1;
}
// Read in the source frame.
- source = vp10_lookahead_pop(cpi->lookahead, flush);
+ source = av1_lookahead_pop(cpi->lookahead, flush);
if (source != NULL) {
cm->show_frame = 1;
@@ -5367,12 +5366,12 @@
*time_stamp = source->ts_start;
*time_end = source->ts_end;
- *frame_flags = (source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+ *frame_flags = (source->flags & AOM_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
} else {
*size = 0;
if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
- vp10_end_first_pass(cpi); /* get last stats packet */
+ av1_end_first_pass(cpi); /* get last stats packet */
cpi->twopass.first_pass_done = 1;
}
return -1;
@@ -5384,7 +5383,7 @@
}
// Clear down mmx registers
- vpx_clear_system_state();
+ aom_clear_system_state();
// adjust frame rates based on timestamps given
if (cm->show_frame) adjust_frame_rate(cpi, source);
@@ -5421,7 +5420,7 @@
cpi->frame_flags = *frame_flags;
if (oxcf->pass == 2) {
- vp10_rc_get_second_pass_params(cpi);
+ av1_rc_get_second_pass_params(cpi);
} else if (oxcf->pass == 1) {
set_frame_size(cpi);
}
@@ -5439,7 +5438,7 @@
if (oxcf->pass == 1) {
cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf);
- vp10_first_pass(cpi, source);
+ av1_first_pass(cpi, source);
} else if (oxcf->pass == 2) {
Pass2Encode(cpi, size, dest, frame_flags);
} else {
@@ -5459,8 +5458,8 @@
cpi->droppable = !frame_is_reference(cpi);
}
- vpx_usec_timer_mark(&cmptimer);
- cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+ aom_usec_timer_mark(&cmptimer);
+ cpi->time_compress_data += aom_usec_timer_elapsed(&cmptimer);
if (cpi->b_calculate_psnr && oxcf->pass != 1 && cm->show_frame)
generate_psnr_packet(cpi);
@@ -5472,13 +5471,13 @@
}
#endif // CONFIG_INTERNAL_STATS
- vpx_clear_system_state();
+ aom_clear_system_state();
return 0;
}
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
- VP10_COMMON *cm = &cpi->common;
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
+ AV1_COMMON *cm = &cpi->common;
if (!cm->show_frame) {
return -1;
} else {
@@ -5493,12 +5492,12 @@
} else {
ret = -1;
}
- vpx_clear_system_state();
+ aom_clear_system_state();
return ret;
}
}
-int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame) {
+int av1_get_last_show_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *frame) {
if (cpi->last_show_frame_buf_idx == INVALID_IDX) return -1;
*frame =
@@ -5506,9 +5505,9 @@
return 0;
}
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
- VPX_SCALING vert_mode) {
- VP10_COMMON *cm = &cpi->common;
+int av1_set_internal_size(AV1_COMP *cpi, AOM_SCALING horiz_mode,
+ AOM_SCALING vert_mode) {
+ AV1_COMMON *cm = &cpi->common;
int hr = 0, hs = 0, vr = 0, vs = 0;
if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
@@ -5527,14 +5526,14 @@
return 0;
}
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
- unsigned int height) {
- VP10_COMMON *cm = &cpi->common;
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
+ unsigned int height) {
+ AV1_COMMON *cm = &cpi->common;
+#if CONFIG_AOM_HIGHBITDEPTH
check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
#else
check_initial_width(cpi, 1, 1);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (width) {
cm->width = width;
@@ -5559,49 +5558,49 @@
return 0;
}
-int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
+int av1_get_quantizer(AV1_COMP *cpi) { return cpi->common.base_qindex; }
-void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) {
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags) {
if (flags &
- (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
- int ref = VPX_REFFRAME_ALL;
+ (AOM_EFLAG_NO_REF_LAST | AOM_EFLAG_NO_REF_GF | AOM_EFLAG_NO_REF_ARF)) {
+ int ref = AOM_REFFRAME_ALL;
- if (flags & VP8_EFLAG_NO_REF_LAST) {
- ref ^= VPX_LAST_FLAG;
+ if (flags & AOM_EFLAG_NO_REF_LAST) {
+ ref ^= AOM_LAST_FLAG;
#if CONFIG_EXT_REFS
- ref ^= VPX_LAST2_FLAG;
- ref ^= VPX_LAST3_FLAG;
+ ref ^= AOM_LAST2_FLAG;
+ ref ^= AOM_LAST3_FLAG;
#endif // CONFIG_EXT_REFS
}
- if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VPX_GOLD_FLAG;
+ if (flags & AOM_EFLAG_NO_REF_GF) ref ^= AOM_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
+ if (flags & AOM_EFLAG_NO_REF_ARF) ref ^= AOM_ALT_FLAG;
- vp10_use_as_reference(cpi, ref);
+ av1_use_as_reference(cpi, ref);
}
if (flags &
- (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
- int upd = VPX_REFFRAME_ALL;
+ (AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF | AOM_EFLAG_NO_UPD_ARF |
+ AOM_EFLAG_FORCE_GF | AOM_EFLAG_FORCE_ARF)) {
+ int upd = AOM_REFFRAME_ALL;
- if (flags & VP8_EFLAG_NO_UPD_LAST) {
- upd ^= VPX_LAST_FLAG;
+ if (flags & AOM_EFLAG_NO_UPD_LAST) {
+ upd ^= AOM_LAST_FLAG;
#if CONFIG_EXT_REFS
- upd ^= VPX_LAST2_FLAG;
- upd ^= VPX_LAST3_FLAG;
+ upd ^= AOM_LAST2_FLAG;
+ upd ^= AOM_LAST3_FLAG;
#endif // CONFIG_EXT_REFS
}
- if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VPX_GOLD_FLAG;
+ if (flags & AOM_EFLAG_NO_UPD_GF) upd ^= AOM_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
+ if (flags & AOM_EFLAG_NO_UPD_ARF) upd ^= AOM_ALT_FLAG;
- vp10_update_reference(cpi, upd);
+ av1_update_reference(cpi, upd);
}
- if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
- vp10_update_entropy(cpi, 0);
+ if (flags & AOM_EFLAG_NO_UPD_ENTROPY) {
+ av1_update_entropy(cpi, 0);
}
}
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 719615b..821d2f1 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_ENCODER_H_
-#define VP10_ENCODER_ENCODER_H_
+#ifndef AV1_ENCODER_ENCODER_H_
+#define AV1_ENCODER_ENCODER_H_
#include <stdio.h>
-#include "./vpx_config.h"
-#include "aom/vp8cx.h"
+#include "./aom_config.h"
+#include "aom/aomcx.h"
#include "av1/common/alloccommon.h"
#include "av1/common/entropymode.h"
@@ -41,8 +41,8 @@
#include "aom_dsp/ssim.h"
#endif
#include "aom_dsp/variance.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom_util/vpx_thread.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_util/aom_thread.h"
#ifdef __cplusplus
extern "C" {
@@ -100,7 +100,7 @@
FOURFIVE = 1,
THREEFIVE = 2,
ONETWO = 3
-} VPX_SCALING;
+} AOM_SCALING;
typedef enum {
// Good Quality Fast Encoding. The encoder balances quality with the amount of
@@ -143,9 +143,9 @@
RESIZE_DYNAMIC = 2 // Coded size of each frame is determined by the codec.
} RESIZE_TYPE;
-typedef struct VP10EncoderConfig {
+typedef struct AV1EncoderConfig {
BITSTREAM_PROFILE profile;
- vpx_bit_depth_t bit_depth; // Codec bit-depth.
+ aom_bit_depth_t bit_depth; // Codec bit-depth.
int width; // width of data passed to the compressor
int height; // height of data passed to the compressor
unsigned int input_bit_depth; // Input bit depth.
@@ -175,7 +175,7 @@
// DATARATE CONTROL OPTIONS
// vbr, cbr, constrained quality or constant quality
- enum vpx_rc_mode rc_mode;
+ enum aom_rc_mode rc_mode;
// buffer targeting aggressiveness
int under_shoot_pct;
@@ -246,29 +246,29 @@
int max_threads;
- vpx_fixed_buf_t two_pass_stats_in;
- struct vpx_codec_pkt_list *output_pkt_list;
+ aom_fixed_buf_t two_pass_stats_in;
+ struct aom_codec_pkt_list *output_pkt_list;
#if CONFIG_FP_MB_STATS
- vpx_fixed_buf_t firstpass_mb_stats_in;
+ aom_fixed_buf_t firstpass_mb_stats_in;
#endif
- vpx_tune_metric tuning;
- vpx_tune_content content;
-#if CONFIG_VP9_HIGHBITDEPTH
+ aom_tune_metric tuning;
+ aom_tune_content content;
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth;
#endif
- vpx_color_space_t color_space;
+ aom_color_space_t color_space;
int color_range;
int render_width;
int render_height;
#if CONFIG_EXT_PARTITION
- vpx_superblock_size_t superblock_size;
+ aom_superblock_size_t superblock_size;
#endif // CONFIG_EXT_PARTITION
-} VP10EncoderConfig;
+} AV1EncoderConfig;
-static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) {
+static INLINE int is_lossless_requested(const AV1EncoderConfig *cfg) {
return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0;
}
@@ -280,7 +280,7 @@
} TileDataEnc;
typedef struct RD_COUNTS {
- vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
+ av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
int64_t comp_pred_diff[REFERENCE_MODES];
int m_search_count;
int ex_search_count;
@@ -321,11 +321,11 @@
#if CONFIG_ENTROPY
typedef struct SUBFRAME_STATS {
- vp10_coeff_probs_model coef_probs_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
- vp10_coeff_count coef_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+ av1_coeff_probs_model coef_probs_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+ av1_coeff_count coef_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
unsigned int eob_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES][REF_TYPES]
[COEF_BANDS][COEFF_CONTEXTS];
- vp10_coeff_probs_model enc_starting_coef_probs[TX_SIZES][PLANE_TYPES];
+ av1_coeff_probs_model enc_starting_coef_probs[TX_SIZES][PLANE_TYPES];
} SUBFRAME_STATS;
#endif // CONFIG_ENTROPY
@@ -334,7 +334,7 @@
size_t size;
} TileBufferEnc;
-typedef struct VP10_COMP {
+typedef struct AV1_COMP {
QUANTS quants;
ThreadData td;
MB_MODE_INFO_EXT *mbmi_ext_base;
@@ -346,8 +346,8 @@
DECLARE_ALIGNED(16, dequant_val_type_nuq,
uv_dequant_val_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS]);
#endif // CONFIG_NEW_QUANT
- VP10_COMMON common;
- VP10EncoderConfig oxcf;
+ AV1_COMMON common;
+ AV1EncoderConfig oxcf;
struct lookahead_ctx *lookahead;
struct lookahead_entry *alt_ref_source;
@@ -431,7 +431,7 @@
// sufficient space to the size of the maximum possible number of frames.
int interp_filter_selected[REF_FRAMES + 1][SWITCHABLE];
- struct vpx_codec_pkt_list *output_pkt_list;
+ struct aom_codec_pkt_list *output_pkt_list;
MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
int mbgraph_n_frames; // number of frames filled in the above
@@ -461,9 +461,9 @@
ActiveMap active_map;
fractional_mv_step_fp *find_fractional_mv_step;
- vp10_full_search_fn_t full_search_sad; // It is currently unused.
- vp10_diamond_search_fn_t diamond_search_sad;
- vpx_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
+ av1_full_search_fn_t full_search_sad; // It is currently unused.
+ av1_diamond_search_fn_t diamond_search_sad;
+ aom_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
uint64_t time_receive_data;
uint64_t time_compress_data;
uint64_t time_pick_lpf;
@@ -609,14 +609,14 @@
// Multi-threading
int num_workers;
- VPxWorker *workers;
+ AVxWorker *workers;
struct EncWorkerData *tile_thr_data;
- VP10LfSync lf_row_sync;
+ AV1LfSync lf_row_sync;
#if CONFIG_ENTROPY
SUBFRAME_STATS subframe_stats;
// TODO(yaowu): minimize the size of count buffers
SUBFRAME_STATS wholeframe_stats;
- vp10_coeff_stats branch_ct_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+ av1_coeff_stats branch_ct_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
#endif // CONFIG_ENTROPY
#if CONFIG_ANS
struct BufAnsCoder buf_ans;
@@ -631,63 +631,63 @@
#if CONFIG_GLOBAL_MOTION
int global_motion_used[TOTAL_REFS_PER_FRAME];
#endif
-} VP10_COMP;
+} AV1_COMP;
-void vp10_initialize_enc(void);
+void av1_initialize_enc(void);
-struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
- BufferPool *const pool);
-void vp10_remove_compressor(VP10_COMP *cpi);
+struct AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
+ BufferPool *const pool);
+void av1_remove_compressor(AV1_COMP *cpi);
-void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
+void av1_change_config(AV1_COMP *cpi, const AV1EncoderConfig *oxcf);
// receive a frames worth of data. caller can assume that a copy of this
// frame is made and not just a copy of the pointer..
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time_stamp);
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time_stamp);
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest, int64_t *time_stamp,
- int64_t *time_end, int flush);
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush);
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest);
-int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame);
+int av1_get_last_show_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *frame);
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags);
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags);
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
-
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp10_update_entropy(VP10_COMP *cpi, int update);
+int av1_set_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_update_entropy(AV1_COMP *cpi, int update);
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
- VPX_SCALING vert_mode);
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
- unsigned int height);
+int av1_set_internal_size(AV1_COMP *cpi, AOM_SCALING horiz_mode,
+ AOM_SCALING vert_mode);
-int vp10_get_quantizer(struct VP10_COMP *cpi);
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
+ unsigned int height);
-void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
- vp10_coeff_count *full_count);
+int av1_get_quantizer(struct AV1_COMP *cpi);
-static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
+void av1_full_to_model_counts(av1_coeff_count_model *model_count,
+ av1_coeff_count *full_count);
+
+static INLINE int frame_is_kf_gf_arf(const AV1_COMP *cpi) {
return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
}
-static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
+static INLINE int get_ref_frame_map_idx(const AV1_COMP *cpi,
MV_REFERENCE_FRAME ref_frame) {
#if CONFIG_EXT_REFS
if (ref_frame >= LAST_FRAME && ref_frame <= LAST3_FRAME)
@@ -705,23 +705,23 @@
return cpi->alt_fb_idx;
}
-static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi,
+static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
MV_REFERENCE_FRAME ref_frame) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX;
}
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
- VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+ AV1_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
: NULL;
}
static INLINE const YV12_BUFFER_CONFIG *get_upsampled_ref(
- VP10_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
+ AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
// Use up-sampled reference frames.
const int buf_idx =
cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)];
@@ -729,10 +729,9 @@
}
#if CONFIG_EXT_REFS
-static INLINE int enc_is_ref_frame_buf(VP10_COMP *cpi,
- RefCntBuffer *frame_buf) {
+static INLINE int enc_is_ref_frame_buf(AV1_COMP *cpi, RefCntBuffer *frame_buf) {
MV_REFERENCE_FRAME ref_frame;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
if (buf_idx == INVALID_IDX) continue;
@@ -760,32 +759,32 @@
return get_token_alloc(tile_mb_rows, tile_mb_cols);
}
-void vp10_alloc_compressor_data(VP10_COMP *cpi);
+void av1_alloc_compressor_data(AV1_COMP *cpi);
-void vp10_scale_references(VP10_COMP *cpi);
+void av1_scale_references(AV1_COMP *cpi);
-void vp10_update_reference_frames(VP10_COMP *cpi);
+void av1_update_reference_frames(AV1_COMP *cpi);
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv);
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled);
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled);
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled);
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled);
-void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags);
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags);
-static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_altref_enabled(const AV1_COMP *const cpi) {
return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
cpi->oxcf.enable_auto_arf;
}
// TODO(zoeliu): To set up cpi->oxcf.enable_auto_brf
#if 0 && CONFIG_EXT_REFS
-static INLINE int is_bwdref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_bwdref_enabled(const AV1_COMP *const cpi) {
// NOTE(zoeliu): The enabling of bi-predictive frames depends on the use of
// alt_ref, and now will be off when the alt_ref interval is
// not sufficiently large.
@@ -793,7 +792,7 @@
}
#endif // CONFIG_EXT_REFS
-static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
xd->block_refs[0] =
@@ -806,11 +805,11 @@
return frame_index & 0x1;
}
-static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) {
+static INLINE int *cond_cost_list(const struct AV1_COMP *cpi, int *cost_list) {
return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL;
}
-void vp10_new_framerate(VP10_COMP *cpi, double framerate);
+void av1_new_framerate(AV1_COMP *cpi, double framerate);
#define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl))
@@ -830,4 +829,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODER_H_
+#endif // AV1_ENCODER_ENCODER_H_
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index 63d716c..d4c0a7a 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -11,7 +11,7 @@
#include "av1/encoder/encodeframe.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/ethread.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
int i, j, k, l, m, n;
@@ -34,8 +34,8 @@
}
static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
- VP10_COMP *const cpi = thread_data->cpi;
- const VP10_COMMON *const cm = &cpi->common;
+ AV1_COMP *const cpi = thread_data->cpi;
+ const AV1_COMMON *const cm = &cpi->common;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
int t;
@@ -47,31 +47,31 @@
int tile_row = t / tile_cols;
int tile_col = t % tile_cols;
- vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+ av1_encode_tile(cpi, thread_data->td, tile_row, tile_col);
}
return 0;
}
-void vp10_encode_tiles_mt(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tiles_mt(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = cm->tile_cols;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
- const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+ const int num_workers = AOMMIN(cpi->oxcf.max_threads, tile_cols);
int i;
- vp10_init_tile_data(cpi);
+ av1_init_tile_data(cpi);
// Only run once to create threads and allocate thread data.
if (cpi->num_workers == 0) {
CHECK_MEM_ERROR(cm, cpi->workers,
- vpx_malloc(num_workers * sizeof(*cpi->workers)));
+ aom_malloc(num_workers * sizeof(*cpi->workers)));
CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
- vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
+ aom_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
for (i = 0; i < num_workers; i++) {
- VPxWorker *const worker = &cpi->workers[i];
+ AVxWorker *const worker = &cpi->workers[i];
EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
++cpi->num_workers;
@@ -82,25 +82,25 @@
if (i < num_workers - 1) {
// Allocate thread data.
CHECK_MEM_ERROR(cm, thread_data->td,
- vpx_memalign(32, sizeof(*thread_data->td)));
- vp10_zero(*thread_data->td);
+ aom_memalign(32, sizeof(*thread_data->td)));
+ av1_zero(*thread_data->td);
// Set up pc_tree.
thread_data->td->leaf_tree = NULL;
thread_data->td->pc_tree = NULL;
- vp10_setup_pc_tree(cm, thread_data->td);
+ av1_setup_pc_tree(cm, thread_data->td);
// Set up variance tree if needed.
if (cpi->sf.partition_search_type == VAR_BASED_PARTITION)
- vp10_setup_var_tree(cm, &cpi->td);
+ av1_setup_var_tree(cm, &cpi->td);
// Allocate frame counters in thread data.
CHECK_MEM_ERROR(cm, thread_data->td->counts,
- vpx_calloc(1, sizeof(*thread_data->td->counts)));
+ aom_calloc(1, sizeof(*thread_data->td->counts)));
// Create threads
if (!winterface->reset(worker))
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Tile encoder thread creation failed");
} else {
// Main thread acts as a worker and uses the thread data in cpi.
@@ -112,10 +112,10 @@
}
for (i = 0; i < num_workers; i++) {
- VPxWorker *const worker = &cpi->workers[i];
+ AVxWorker *const worker = &cpi->workers[i];
EncWorkerData *thread_data;
- worker->hook = (VPxWorkerHook)enc_worker_hook;
+ worker->hook = (AVxWorkerHook)enc_worker_hook;
worker->data1 = &cpi->tile_thr_data[i];
worker->data2 = NULL;
thread_data = (EncWorkerData *)worker->data1;
@@ -134,13 +134,13 @@
if (cpi->common.allow_screen_content_tools && i < num_workers - 1) {
MACROBLOCK *x = &thread_data->td->mb;
CHECK_MEM_ERROR(cm, x->palette_buffer,
- vpx_memalign(16, sizeof(*x->palette_buffer)));
+ aom_memalign(16, sizeof(*x->palette_buffer)));
}
}
// Encode a frame
for (i = 0; i < num_workers; i++) {
- VPxWorker *const worker = &cpi->workers[i];
+ AVxWorker *const worker = &cpi->workers[i];
EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Set the starting tile for each thread.
@@ -154,17 +154,17 @@
// Encoding ends.
for (i = 0; i < num_workers; i++) {
- VPxWorker *const worker = &cpi->workers[i];
+ AVxWorker *const worker = &cpi->workers[i];
winterface->sync(worker);
}
for (i = 0; i < num_workers; i++) {
- VPxWorker *const worker = &cpi->workers[i];
+ AVxWorker *const worker = &cpi->workers[i];
EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Accumulate counters.
if (i < cpi->num_workers - 1) {
- vp10_accumulate_frame_counts(cm, thread_data->td->counts);
+ av1_accumulate_frame_counts(cm, thread_data->td->counts);
accumulate_rd_opt(&cpi->td, thread_data->td);
}
}
diff --git a/av1/encoder/ethread.h b/av1/encoder/ethread.h
index d72816c..161c68f 100644
--- a/av1/encoder/ethread.h
+++ b/av1/encoder/ethread.h
@@ -8,26 +8,26 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_ETHREAD_H_
-#define VP10_ENCODER_ETHREAD_H_
+#ifndef AV1_ENCODER_ETHREAD_H_
+#define AV1_ENCODER_ETHREAD_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
typedef struct EncWorkerData {
- struct VP10_COMP *cpi;
+ struct AV1_COMP *cpi;
struct ThreadData *td;
int start;
} EncWorkerData;
-void vp10_encode_tiles_mt(struct VP10_COMP *cpi);
+void av1_encode_tiles_mt(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ETHREAD_H_
+#endif // AV1_ENCODER_ETHREAD_H_
diff --git a/av1/encoder/extend.c b/av1/encoder/extend.c
index 1b0c442..13e529b 100644
--- a/av1/encoder/extend.c
+++ b/av1/encoder/extend.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/common.h"
@@ -56,7 +56,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
uint8_t *dst8, int dst_pitch, int w,
int h, int extend_top, int extend_left,
@@ -72,9 +72,9 @@
uint16_t *dst_ptr2 = dst + w;
for (i = 0; i < h; i++) {
- vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
+ aom_memset16(dst_ptr1, src_ptr1[0], extend_left);
memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(src_ptr1[0]));
- vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
+ aom_memset16(dst_ptr2, src_ptr2[0], extend_right);
src_ptr1 += src_pitch;
src_ptr2 += src_pitch;
dst_ptr1 += dst_pitch;
@@ -99,10 +99,10 @@
dst_ptr2 += dst_pitch;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst) {
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst) {
// Extend src frame in buffer
// Altref filtering assumes 16 pixel extension
const int et_y = 16;
@@ -111,10 +111,10 @@
// to 64x64, so the right and bottom need to be extended to 64 multiple
// or up to 16, whichever is greater.
const int er_y =
- VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) -
+ AOMMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) -
src->y_crop_width;
const int eb_y =
- VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) -
+ AOMMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) -
src->y_crop_height;
const int uv_width_subsampling = (src->uv_width != src->y_width);
const int uv_height_subsampling = (src->uv_height != src->y_height);
@@ -123,7 +123,7 @@
const int eb_uv = eb_y >> uv_height_subsampling;
const int er_uv = er_y >> uv_width_subsampling;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
dst->y_stride, src->y_crop_width,
@@ -138,7 +138,7 @@
src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
dst->y_stride, src->y_crop_width, src->y_crop_height,
@@ -153,9 +153,9 @@
et_uv, el_uv, eb_uv, er_uv);
}
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst, int srcy,
- int srcx, int srch, int srcw) {
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw) {
// If the side is not touching the bounder then don't extend.
const int et_y = srcy ? 0 : dst->border;
const int el_y = srcx ? 0 : dst->border;
diff --git a/av1/encoder/extend.h b/av1/encoder/extend.h
index 1ad763e..2c436de 100644
--- a/av1/encoder/extend.h
+++ b/av1/encoder/extend.h
@@ -8,24 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_EXTEND_H_
-#define VP10_ENCODER_EXTEND_H_
+#ifndef AV1_ENCODER_EXTEND_H_
+#define AV1_ENCODER_EXTEND_H_
#include "aom_scale/yv12config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst);
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst);
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst, int srcy,
- int srcx, int srch, int srcw);
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_EXTEND_H_
+#endif // AV1_ENCODER_EXTEND_H_
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index b23b839..61a799c 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -12,19 +12,19 @@
#include <math.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_scale/aom_scale.h"
#include "aom_scale/yv12config.h"
#include "av1/common/entropymv.h"
#include "av1/common/quant_common.h"
-#include "av1/common/reconinter.h" // vp10_setup_dst_planes()
+#include "av1/common/reconinter.h" // av1_setup_dst_planes()
#include "av1/encoder/aq_variance.h"
#include "av1/encoder/block.h"
#include "av1/encoder/encodeframe.h"
@@ -95,12 +95,12 @@
}
static void output_stats(FIRSTPASS_STATS *stats,
- struct vpx_codec_pkt_list *pktlist) {
- struct vpx_codec_cx_pkt pkt;
- pkt.kind = VPX_CODEC_STATS_PKT;
+ struct aom_codec_pkt_list *pktlist) {
+ struct aom_codec_cx_pkt pkt;
+ pkt.kind = AOM_CODEC_STATS_PKT;
pkt.data.twopass_stats.buf = stats;
pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
- vpx_codec_pkt_list_add(pktlist, &pkt);
+ aom_codec_pkt_list_add(pktlist, &pkt);
// TEMP debug code
#if OUTPUT_FPF
@@ -125,13 +125,13 @@
}
#if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
- struct vpx_codec_pkt_list *pktlist) {
- struct vpx_codec_cx_pkt pkt;
- pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, AV1_COMMON *cm,
+ struct aom_codec_pkt_list *pktlist) {
+ struct aom_codec_cx_pkt pkt;
+ pkt.kind = AOM_CODEC_FPMB_STATS_PKT;
pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats;
pkt.data.firstpass_mb_stats.sz = cm->initial_mbs * sizeof(uint8_t);
- vpx_codec_pkt_list_add(pktlist, &pkt);
+ aom_codec_pkt_list_add(pktlist, &pkt);
}
#endif
@@ -214,7 +214,7 @@
// Calculate the linear size relative to a baseline of 1080P
#define BASE_SIZE 2073600.0 // 1920x1080
-static double get_linear_size_factor(const VP10_COMP *cpi) {
+static double get_linear_size_factor(const AV1_COMP *cpi) {
const double this_area = cpi->initial_width * cpi->initial_height;
return pow(this_area / BASE_SIZE, 0.5);
}
@@ -223,7 +223,7 @@
// bars and partially discounts other 0 energy areas.
#define MIN_ACTIVE_AREA 0.5
#define MAX_ACTIVE_AREA 1.0
-static double calculate_active_area(const VP10_COMP *cpi,
+static double calculate_active_area(const AV1_COMP *cpi,
const FIRSTPASS_STATS *this_frame) {
double active_pct;
@@ -237,9 +237,9 @@
// Calculate a modified Error used in distributing bits between easier and
// harder frames.
#define ACT_AREA_CORRECTION 0.5
-static double calculate_modified_err(const VP10_COMP *cpi,
+static double calculate_modified_err(const AV1_COMP *cpi,
const TWO_PASS *twopass,
- const VP10EncoderConfig *oxcf,
+ const AV1EncoderConfig *oxcf,
const FIRSTPASS_STATS *this_frame) {
const FIRSTPASS_STATS *const stats = &twopass->total_stats;
const double av_weight = stats->weight / stats->count;
@@ -263,7 +263,7 @@
// This function returns the maximum target rate per frame.
static int frame_max_bits(const RATE_CONTROL *rc,
- const VP10EncoderConfig *oxcf) {
+ const AV1EncoderConfig *oxcf) {
int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
(int64_t)oxcf->two_pass_vbrmax_section) /
100;
@@ -275,20 +275,20 @@
return (int)max_bits;
}
-void vp10_init_first_pass(VP10_COMP *cpi) {
+void av1_init_first_pass(AV1_COMP *cpi) {
zero_stats(&cpi->twopass.total_stats);
}
-void vp10_end_first_pass(VP10_COMP *cpi) {
+void av1_end_first_pass(AV1_COMP *cpi) {
output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
}
-static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
+static aom_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
switch (bsize) {
- case BLOCK_8X8: return vpx_mse8x8;
- case BLOCK_16X8: return vpx_mse16x8;
- case BLOCK_8X16: return vpx_mse8x16;
- default: return vpx_mse16x16;
+ case BLOCK_8X8: return aom_mse8x8;
+ case BLOCK_16X8: return aom_mse16x8;
+ case BLOCK_8X16: return aom_mse8x16;
+ default: return aom_mse16x16;
}
}
@@ -296,37 +296,37 @@
const struct buf_2d *src,
const struct buf_2d *ref) {
unsigned int sse;
- const vpx_variance_fn_t fn = get_block_variance_fn(bsize);
+ const aom_variance_fn_t fn = get_block_variance_fn(bsize);
fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
return sse;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
+#if CONFIG_AOM_HIGHBITDEPTH
+static aom_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
int bd) {
switch (bd) {
default:
switch (bsize) {
- case BLOCK_8X8: return vpx_highbd_8_mse8x8;
- case BLOCK_16X8: return vpx_highbd_8_mse16x8;
- case BLOCK_8X16: return vpx_highbd_8_mse8x16;
- default: return vpx_highbd_8_mse16x16;
+ case BLOCK_8X8: return aom_highbd_8_mse8x8;
+ case BLOCK_16X8: return aom_highbd_8_mse16x8;
+ case BLOCK_8X16: return aom_highbd_8_mse8x16;
+ default: return aom_highbd_8_mse16x16;
}
break;
case 10:
switch (bsize) {
- case BLOCK_8X8: return vpx_highbd_10_mse8x8;
- case BLOCK_16X8: return vpx_highbd_10_mse16x8;
- case BLOCK_8X16: return vpx_highbd_10_mse8x16;
- default: return vpx_highbd_10_mse16x16;
+ case BLOCK_8X8: return aom_highbd_10_mse8x8;
+ case BLOCK_16X8: return aom_highbd_10_mse16x8;
+ case BLOCK_8X16: return aom_highbd_10_mse8x16;
+ default: return aom_highbd_10_mse16x16;
}
break;
case 12:
switch (bsize) {
- case BLOCK_8X8: return vpx_highbd_12_mse8x8;
- case BLOCK_16X8: return vpx_highbd_12_mse16x8;
- case BLOCK_8X16: return vpx_highbd_12_mse8x16;
- default: return vpx_highbd_12_mse16x16;
+ case BLOCK_8X8: return aom_highbd_12_mse8x8;
+ case BLOCK_16X8: return aom_highbd_12_mse16x8;
+ case BLOCK_8X16: return aom_highbd_12_mse8x16;
+ default: return aom_highbd_12_mse16x16;
}
break;
}
@@ -337,23 +337,23 @@
const struct buf_2d *ref,
int bd) {
unsigned int sse;
- const vpx_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd);
+ const aom_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd);
fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
return sse;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Refine the motion search range according to the frame dimension
// for first pass test.
-static int get_search_range(const VP10_COMP *cpi) {
+static int get_search_range(const AV1_COMP *cpi) {
int sr = 0;
- const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
+ const int dim = AOMMIN(cpi->initial_width, cpi->initial_height);
while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
return sr;
}
-static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void first_pass_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
const MV *ref_mv, MV *best_mv,
int *best_motion_err) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -361,7 +361,7 @@
MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
int num00, tmp_err, n;
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
- vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
+ aom_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
const int new_mv_mode_penalty = NEW_MV_MODE_PENALTY;
int step_param = 3;
@@ -372,18 +372,18 @@
// Override the default variance function to use MSE.
v_fn_ptr.vf = get_block_variance_fn(bsize);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Center the initial step/diamond search on best mv.
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
step_param, x->sadperbit16, &num00,
&v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
- tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+ tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
if (tmp_err < *best_motion_err) {
@@ -405,7 +405,7 @@
step_param + n, x->sadperbit16, &num00,
&v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
- tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+ tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty)
tmp_err += new_mv_mode_penalty;
@@ -417,7 +417,7 @@
}
}
-static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
+static BLOCK_SIZE get_bsize(const AV1_COMMON *cm, int mb_row, int mb_col) {
if (2 * mb_col + 1 < cm->mi_cols) {
return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
} else {
@@ -425,19 +425,19 @@
}
}
-static int find_fp_qindex(vpx_bit_depth_t bit_depth) {
+static int find_fp_qindex(aom_bit_depth_t bit_depth) {
int i;
for (i = 0; i < QINDEX_RANGE; ++i)
- if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
if (i == QINDEX_RANGE) i--;
return i;
}
-static void set_first_pass_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_first_pass_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
if (!cpi->refresh_alt_ref_frame &&
(cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
cm->frame_type = KEY_FRAME;
@@ -450,10 +450,10 @@
#define UL_INTRA_THRESH 50
#define INVALID_ROW -1
-void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
+void av1_first_pass(AV1_COMP *cpi, const struct lookahead_entry *source) {
int mb_row, mb_col;
MACROBLOCK *const x = &cpi->td.mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
TileInfo tile;
struct macroblock_plane *const p = x->plane;
@@ -498,32 +498,32 @@
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
- vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
+ av1_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
}
#endif
- vpx_clear_system_state();
+ aom_clear_system_state();
intra_factor = 0.0;
brightness_factor = 0.0;
neutral_count = 0.0;
set_first_pass_params(cpi);
- vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
+ av1_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
- vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
- vp10_setup_src_planes(x, cpi->Source, 0, 0);
- vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0);
+ av1_setup_src_planes(x, cpi->Source, 0, 0);
+ av1_setup_dst_planes(xd->plane, new_yv12, 0, 0);
if (!frame_is_intra_only(cm)) {
- vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
+ av1_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
}
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
- vp10_frame_init_quantizer(cpi);
+ av1_frame_init_quantizer(cpi);
for (i = 0; i < MAX_MB_PLANE; ++i) {
p[i].coeff = ctx->coeff[i][1];
@@ -532,11 +532,11 @@
p[i].eobs = ctx->eobs[i][1];
}
- vp10_init_mv_probs(cm);
- vp10_initialize_rd_consts(cpi);
+ av1_init_mv_probs(cm);
+ av1_initialize_rd_consts(cpi);
// Tiling is ignored in the first pass.
- vp10_tile_init(&tile, cm, 0, 0);
+ av1_tile_init(&tile, cm, 0, 0);
recon_y_stride = new_yv12->y_stride;
recon_uv_stride = new_yv12->uv_stride;
@@ -566,7 +566,7 @@
const int mb_index = mb_row * cm->mb_cols + mb_col;
#endif
- vpx_clear_system_state();
+ aom_clear_system_state();
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
@@ -586,8 +586,8 @@
xd->mi[0]->mbmi.mode = DC_PRED;
xd->mi[0]->mbmi.tx_size =
use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
- vp10_encode_intra_block_plane(x, bsize, 0, 0);
- this_error = vpx_get_mb_ss(x->plane[0].src_diff);
+ av1_encode_intra_block_plane(x, bsize, 0, 0);
+ this_error = aom_get_mb_ss(x->plane[0].src_diff);
// Keep a record of blocks that have almost no intra error residual
// (i.e. are in effect completely flat and untextured in the intra
@@ -600,29 +600,29 @@
image_data_start_row = mb_row;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8: break;
- case VPX_BITS_10: this_error >>= 4; break;
- case VPX_BITS_12: this_error >>= 8; break;
+ case AOM_BITS_8: break;
+ case AOM_BITS_10: this_error >>= 4; break;
+ case AOM_BITS_12: this_error >>= 8; break;
default:
assert(0 &&
- "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ "cm->bit_depth should be AOM_BITS_8, "
+ "AOM_BITS_10 or AOM_BITS_12");
return;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
- vpx_clear_system_state();
+ aom_clear_system_state();
log_intra = log(this_error + 1.0);
if (log_intra < 10.0)
intra_factor += 1.0 + ((10.0 - log_intra) * 0.05);
else
intra_factor += 1.0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth)
level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0];
else
@@ -667,7 +667,7 @@
struct buf_2d unscaled_last_source_buf_2d;
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -678,7 +678,7 @@
#else
motion_error =
get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Compute the motion error of the 0,0 motion using the last source
// frame as the reference. Skip the further motion search on
@@ -687,7 +687,7 @@
cpi->unscaled_last_source->y_buffer + recon_yoffset;
unscaled_last_source_buf_2d.stride =
cpi->unscaled_last_source->y_stride;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
raw_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
@@ -698,7 +698,7 @@
#else
raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&unscaled_last_source_buf_2d);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(pengchong): Replace the hard-coded threshold
if (raw_motion_error > 25) {
@@ -724,7 +724,7 @@
int gf_motion_error;
xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
gf_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -735,7 +735,7 @@
#else
gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&xd->plane[0].pre[0]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
&gf_motion_error);
@@ -782,7 +782,7 @@
#endif
if (motion_error <= this_error) {
- vpx_clear_system_state();
+ aom_clear_system_state();
// Keep a count of cases where the inter and intra were very close
// and very low. This helps with scene cut detection for example in
@@ -806,8 +806,8 @@
xd->mi[0]->mbmi.tx_size = TX_4X4;
xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
xd->mi[0]->mbmi.ref_frame[1] = NONE;
- vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
- vp10_encode_sby_pass1(x, bsize);
+ av1_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
+ av1_encode_sby_pass1(x, bsize);
sum_mvr += mv.row;
sum_mvr_abs += abs(mv.row);
sum_mvc += mv.col;
@@ -916,7 +916,7 @@
x->plane[2].src.buf +=
uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
- vpx_clear_system_state();
+ aom_clear_system_state();
}
// Clamp the image start to rows/2. This number of rows is discarded top
@@ -928,7 +928,7 @@
// Exclude any image dead zone
if (image_data_start_row > 0) {
intra_skip_count =
- VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2));
+ AOMMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2));
}
{
@@ -1021,7 +1021,7 @@
++twopass->sr_update_lag;
}
- vpx_extend_frame_borders(new_yv12);
+ aom_extend_frame_borders(new_yv12);
// The frame we just compressed now becomes the last frame.
#if CONFIG_EXT_REFS
@@ -1066,12 +1066,12 @@
static double calc_correction_factor(double err_per_mb, double err_divisor,
double pt_low, double pt_high, int q,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
const double error_term = err_per_mb / err_divisor;
// Adjustment based on actual quantizer to power term.
const double power_term =
- VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
+ AOMMIN(av1_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
// Calculate correction factor.
if (power_term < 1.0) assert(error_term >= 0.0);
@@ -1080,13 +1080,13 @@
}
#define ERR_DIVISOR 100.0
-static int get_twopass_worst_quality(const VP10_COMP *cpi,
+static int get_twopass_worst_quality(const AV1_COMP *cpi,
const double section_err,
double inactive_zone,
int section_target_bandwidth,
double group_weight_factor) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
@@ -1096,7 +1096,7 @@
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
? cpi->initial_mbs
: cpi->common.MBs;
- const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
+ const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
const double av_err_per_mb = section_err / active_mbs;
const double speed_term = 1.0 + 0.04 * oxcf->speed;
double ediv_size_correction;
@@ -1110,7 +1110,7 @@
// motion vectors. Some account of this is made through adjustment of
// the error divisor.
ediv_size_correction =
- VPXMAX(0.2, VPXMIN(5.0, get_linear_size_factor(cpi)));
+ AOMMAX(0.2, AOMMIN(5.0, get_linear_size_factor(cpi)));
if (ediv_size_correction < 1.0)
ediv_size_correction = -(1.0 / ediv_size_correction);
ediv_size_correction *= 4.0;
@@ -1121,29 +1121,29 @@
const double factor = calc_correction_factor(
av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
FACTOR_PT_HIGH, q, cpi->common.bit_depth);
- const int bits_per_mb = vp10_rc_bits_per_mb(
+ const int bits_per_mb = av1_rc_bits_per_mb(
INTER_FRAME, q, factor * speed_term * group_weight_factor,
cpi->common.bit_depth);
if (bits_per_mb <= target_norm_bits_per_mb) break;
}
// Restriction on active max q for constrained quality mode.
- if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
+ if (cpi->oxcf.rc_mode == AOM_CQ) q = AOMMAX(q, oxcf->cq_level);
return q;
}
}
-static void setup_rf_level_maxq(VP10_COMP *cpi) {
+static void setup_rf_level_maxq(AV1_COMP *cpi) {
int i;
RATE_CONTROL *const rc = &cpi->rc;
for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) {
- int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality);
- rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
+ int qdelta = av1_frame_type_qdelta(cpi, i, rc->worst_quality);
+ rc->rf_level_maxq[i] = AOMMAX(rc->worst_quality + qdelta, rc->best_quality);
}
}
-void vp10_init_subsampling(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_init_subsampling(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
const int w = cm->width;
const int h = cm->height;
@@ -1158,15 +1158,15 @@
setup_rf_level_maxq(cpi);
}
-void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
- int *scaled_frame_height) {
+void av1_calculate_coded_size(AV1_COMP *cpi, int *scaled_frame_width,
+ int *scaled_frame_height) {
RATE_CONTROL *const rc = &cpi->rc;
*scaled_frame_width = rc->frame_width[rc->frame_size_selector];
*scaled_frame_height = rc->frame_height[rc->frame_size_selector];
}
-void vp10_init_second_pass(VP10_COMP *cpi) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_init_second_pass(AV1_COMP *cpi) {
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
double frame_rate;
FIRSTPASS_STATS *stats;
@@ -1187,7 +1187,7 @@
// encoded in the second pass is a guess. However, the sum duration is not.
// It is calculated based on the actual durations of all frames from the
// first pass.
- vp10_new_framerate(cpi, frame_rate);
+ av1_new_framerate(cpi, frame_rate);
twopass->bits_left =
(int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
@@ -1223,7 +1223,7 @@
twopass->last_kfgroup_zeromotion_pct = 100;
if (oxcf->resize_mode != RESIZE_NONE) {
- vp10_init_subsampling(cpi);
+ av1_init_subsampling(cpi);
}
}
@@ -1234,7 +1234,7 @@
#define LOW_SR_DIFF_TRHESH 0.1
#define SR_DIFF_MAX 128.0
-static double get_sr_decay_rate(const VP10_COMP *cpi,
+static double get_sr_decay_rate(const AV1_COMP *cpi,
const FIRSTPASS_STATS *frame) {
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
: cpi->common.MBs;
@@ -1253,40 +1253,40 @@
modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
- sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
+ sr_diff = AOMMIN(sr_diff, SR_DIFF_MAX);
sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
(MOTION_AMP_PART * motion_amplitude_factor) -
(INTRA_PART * modified_pcnt_intra);
}
- return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
+ return AOMMAX(sr_decay, AOMMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
}
// This function gives an estimate of how badly we believe the prediction
// quality is decaying from frame to frame.
-static double get_zero_motion_factor(const VP10_COMP *cpi,
+static double get_zero_motion_factor(const AV1_COMP *cpi,
const FIRSTPASS_STATS *frame) {
const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
double sr_decay = get_sr_decay_rate(cpi, frame);
- return VPXMIN(sr_decay, zero_motion_pct);
+ return AOMMIN(sr_decay, zero_motion_pct);
}
#define ZM_POWER_FACTOR 0.75
-static double get_prediction_decay_rate(const VP10_COMP *cpi,
+static double get_prediction_decay_rate(const AV1_COMP *cpi,
const FIRSTPASS_STATS *next_frame) {
const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
const double zero_motion_factor =
(0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
ZM_POWER_FACTOR));
- return VPXMAX(zero_motion_factor,
+ return AOMMAX(zero_motion_factor,
(sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
}
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+static int detect_transition_to_still(AV1_COMP *cpi, int frame_interval,
int still_interval,
double loop_decay_rate,
double last_decay_rate) {
@@ -1360,18 +1360,17 @@
}
#define BASELINE_ERR_PER_MB 1000.0
-static double calc_frame_boost(VP10_COMP *cpi,
- const FIRSTPASS_STATS *this_frame,
+static double calc_frame_boost(AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame,
double this_frame_mv_in_out, double max_boost) {
double frame_boost;
- const double lq = vp10_convert_qindex_to_q(
+ const double lq = av1_convert_qindex_to_q(
cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
- const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
+ const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
: cpi->common.MBs;
// Correct for any inactive region in the image
- num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
+ num_mbs = (int)AOMMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
// Underlying boost factor is based on inter error ratio.
frame_boost = (BASELINE_ERR_PER_MB * num_mbs) /
@@ -1387,11 +1386,11 @@
else
frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
- return VPXMIN(frame_boost, max_boost * boost_q_correction);
+ return AOMMIN(frame_boost, max_boost * boost_q_correction);
}
-static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
- int b_frames, int *f_boost, int *b_boost) {
+static int calc_arf_boost(AV1_COMP *cpi, int offset, int f_frames, int b_frames,
+ int *f_boost, int *b_boost) {
TWO_PASS *const twopass = &cpi->twopass;
int i;
double boost_score = 0.0;
@@ -1473,7 +1472,7 @@
arf_boost = (*f_boost + *b_boost);
if (arf_boost < ((b_frames + f_frames) * 20))
arf_boost = ((b_frames + f_frames) * 20);
- arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST);
+ arf_boost = AOMMAX(arf_boost, MIN_ARF_GF_BOOST);
return arf_boost;
}
@@ -1498,7 +1497,7 @@
}
// Calculate the total bits to allocate in this GF/ARF group.
-static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
+static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
double gf_group_err) {
const RATE_CONTROL *const rc = &cpi->rc;
const TWO_PASS *const twopass = &cpi->twopass;
@@ -1544,7 +1543,7 @@
}
// Calculate the number of extra bits for use in the boosted frame or frames.
- return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
+ return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
0);
}
@@ -1562,10 +1561,10 @@
}
#endif
-static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
+static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
double group_error, int gf_arf_bits) {
RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
FIRSTPASS_STATS frame_stats;
@@ -1606,7 +1605,7 @@
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
- vp10_zero_array(ext_arf_boost, MAX_EXT_ARFS);
+ av1_zero_array(ext_arf_boost, MAX_EXT_ARFS);
#endif
key_frame = cpi->common.frame_type == KEY_FRAME;
@@ -1761,7 +1760,7 @@
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
#endif // CONFIG_EXT_REFS
target_frame_size =
- clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
+ clamp(target_frame_size, 0, AOMMIN(max_bits, (int)total_group_bits));
#if CONFIG_EXT_REFS
// If we are going to have ARFs, check if we can have BWDREF in this
@@ -1862,7 +1861,7 @@
// Note:
// We need to configure the frame at the end of the sequence + 1 that will be
// the start frame for the next group. Otherwise prior to the call to
-// vp10_rc_get_second_pass_params() the data will be undefined.
+// av1_rc_get_second_pass_params() the data will be undefined.
#if CONFIG_EXT_REFS
gf_group->arf_update_idx[frame_index] = 0;
gf_group->arf_ref_idx[frame_index] = 0;
@@ -1908,10 +1907,10 @@
cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled;
}
// Analyse and define a gf/arf group.
-static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
- VP10_COMMON *const cm = &cpi->common;
+static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
- VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
FIRSTPASS_STATS next_frame;
const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
@@ -1955,11 +1954,11 @@
// Reset the GF group data structures unless this is a key
// frame in which case it will already have been done.
if (is_key_frame == 0) {
- vp10_zero(twopass->gf_group);
+ av1_zero(twopass->gf_group);
}
- vpx_clear_system_state();
- vp10_zero(next_frame);
+ aom_clear_system_state();
+ av1_zero(next_frame);
// Load stats for the current frame.
mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
@@ -1986,12 +1985,12 @@
// Set a maximum and minimum interval for the GF group.
// If the image appears almost completely static we can extend beyond this.
{
- int int_max_q = (int)(vp10_convert_qindex_to_q(
- twopass->active_worst_quality, cpi->common.bit_depth));
- int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
- cpi->common.bit_depth));
+ int int_max_q = (int)(av1_convert_qindex_to_q(twopass->active_worst_quality,
+ cpi->common.bit_depth));
+ int int_lbq = (int)(av1_convert_qindex_to_q(rc->last_boosted_qindex,
+ cpi->common.bit_depth));
- active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
+ active_min_gf_interval = rc->min_gf_interval + AOMMIN(2, int_max_q / 200);
if (active_min_gf_interval > rc->max_gf_interval)
active_min_gf_interval = rc->max_gf_interval;
@@ -2002,7 +2001,7 @@
// bits to spare and are better with a smaller interval and smaller boost.
// At high Q when there are few bits to spare we are better with a longer
// interval to spread the cost of the GF.
- active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6));
+ active_max_gf_interval = 12 + AOMMIN(4, (int_lbq / 6));
// We have: active_min_gf_interval <= rc->max_gf_interval
if (active_max_gf_interval < active_min_gf_interval)
@@ -2044,7 +2043,7 @@
decay_accumulator = decay_accumulator * loop_decay_rate;
// Monitor for static sections.
- zero_motion_accumulator = VPXMIN(
+ zero_motion_accumulator = AOMMIN(
zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
// Break clause to detect very still sections after motion. For example,
@@ -2102,7 +2101,7 @@
? 1
: 0;
} else {
- rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
+ rc->gfu_boost = AOMMAX((int)boost_score, MIN_ARF_GF_BOOST);
rc->source_alt_ref_pending = 0;
}
@@ -2137,7 +2136,7 @@
// where there could be significant overshoot than for easier
// sections where we do not wish to risk creating an overshoot
// of the allocated bit budget.
- if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
+ if ((cpi->oxcf.rc_mode != AOM_Q) && (rc->baseline_gf_interval > 1)) {
const int vbr_group_bits_per_frame =
(int)(gf_group_bits / rc->baseline_gf_interval);
const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
@@ -2151,17 +2150,17 @@
// rc factor is a weight factor that corrects for local rate control drift.
double rc_factor = 1.0;
if (rc->rate_error_estimate > 0) {
- rc_factor = VPXMAX(RC_FACTOR_MIN,
+ rc_factor = AOMMAX(RC_FACTOR_MIN,
(double)(100 - rc->rate_error_estimate) / 100.0);
} else {
- rc_factor = VPXMIN(RC_FACTOR_MAX,
+ rc_factor = AOMMIN(RC_FACTOR_MAX,
(double)(100 - rc->rate_error_estimate) / 100.0);
}
tmp_q = get_twopass_worst_quality(
cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor);
twopass->active_worst_quality =
- VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
+ AOMMAX(tmp_q, twopass->active_worst_quality >> 1);
}
#endif
@@ -2315,12 +2314,12 @@
#define FRAMES_TO_CHECK_DECAY 8
-static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int i, j;
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const FIRSTPASS_STATS first_frame = *this_frame;
const FIRSTPASS_STATS *const start_position = twopass->stats_in;
FIRSTPASS_STATS next_frame;
@@ -2335,12 +2334,12 @@
double kf_group_err = 0.0;
double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
- vp10_zero(next_frame);
+ av1_zero(next_frame);
cpi->common.frame_type = KEY_FRAME;
// Reset the GF group data structures.
- vp10_zero(*gf_group);
+ av1_zero(*gf_group);
// Is this a forced key frame by interval.
rc->this_key_frame_forced = rc->next_key_frame_forced;
@@ -2465,7 +2464,7 @@
} else {
twopass->kf_group_bits = 0;
}
- twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits);
+ twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
// Reset the first pass file position.
reset_fpf_position(twopass, start_position);
@@ -2478,7 +2477,7 @@
if (EOF == input_stats(twopass, &next_frame)) break;
// Monitor for static sections.
- zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+ zero_motion_accumulator = AOMMIN(zero_motion_accumulator,
get_zero_motion_factor(cpi, &next_frame));
// Not all frames in the group are necessarily used in calculating boost.
@@ -2492,7 +2491,7 @@
const double loop_decay_rate =
get_prediction_decay_rate(cpi, &next_frame);
decay_accumulator *= loop_decay_rate;
- decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
+ decay_accumulator = AOMMAX(decay_accumulator, MIN_DECAY_FACTOR);
av_decay_accumulator += decay_accumulator;
++loop_decay_counter;
}
@@ -2512,8 +2511,8 @@
// Apply various clamps for min and max boost
rc->kf_boost = (int)(av_decay_accumulator * boost_score);
- rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3));
- rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
+ rc->kf_boost = AOMMAX(rc->kf_boost, (rc->frames_to_key * 3));
+ rc->kf_boost = AOMMAX(rc->kf_boost, MIN_KF_BOOST);
// Work out how many bits to allocate for the key frame itself.
kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
@@ -2551,7 +2550,7 @@
}
// Define the reference buffers that will be updated post encode.
-static void configure_buffer_updates(VP10_COMP *cpi) {
+static void configure_buffer_updates(AV1_COMP *cpi) {
TWO_PASS *const twopass = &cpi->twopass;
// Wei-Ting: Should we define another function to take care of
@@ -2634,7 +2633,7 @@
// Allow BRF use the farthest ALT_REF (ALT0) as BWD_REF by swapping
// the virtual indices.
// NOTE: The indices will be swapped back after this frame is encoded
- // (in vp10_update_reference_frames()).
+ // (in av1_update_reference_frames()).
int tmp = cpi->bwd_fb_idx;
cpi->bwd_fb_idx = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->arf_map[0];
@@ -2671,7 +2670,7 @@
}
}
-static int is_skippable_frame(const VP10_COMP *cpi) {
+static int is_skippable_frame(const AV1_COMP *cpi) {
// If the current frame does not have non-zero motion vector detected in the
// first pass, and so do its previous and forward frames, then this frame
// can be skipped for partition check, and the partition size is assigned
@@ -2690,8 +2689,8 @@
twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
}
-void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_second_pass_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
@@ -2710,7 +2709,7 @@
int target_rate;
configure_buffer_updates(cpi);
target_rate = gf_group->bit_allocation[gf_group->index];
- target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
rc->base_frame_target = target_rate;
cm->frame_type = INTER_FRAME;
@@ -2724,9 +2723,9 @@
return;
}
- vpx_clear_system_state();
+ aom_clear_system_state();
- if (cpi->oxcf.rc_mode == VPX_Q) {
+ if (cpi->oxcf.rc_mode == AOM_Q) {
twopass->active_worst_quality = cpi->oxcf.cq_level;
} else if (cm->current_video_frame == 0) {
// Special case code for first frame.
@@ -2748,13 +2747,13 @@
twopass->baseline_active_worst_quality = tmp_q;
rc->ni_av_qi = tmp_q;
rc->last_q[INTER_FRAME] = tmp_q;
- rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth);
+ rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->bit_depth);
rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2;
rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
}
- vp10_zero(this_frame);
+ av1_zero(this_frame);
if (EOF == input_stats(twopass, &this_frame)) return;
// Set the frame content type flag.
@@ -2805,9 +2804,9 @@
target_rate = gf_group->bit_allocation[gf_group->index];
if (cpi->common.frame_type == KEY_FRAME)
- target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_iframe_target_size(cpi, target_rate);
else
- target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
rc->base_frame_target = target_rate;
@@ -2828,7 +2827,7 @@
#define MINQ_ADJ_LIMIT 48
#define MINQ_ADJ_LIMIT_CQ 20
#define HIGH_UNDERSHOOT_RATIO 2
-void vp10_twopass_postencode_update(VP10_COMP *cpi) {
+void av1_twopass_postencode_update(AV1_COMP *cpi) {
TWO_PASS *const twopass = &cpi->twopass;
RATE_CONTROL *const rc = &cpi->rc;
const int bits_used = rc->base_frame_target;
@@ -2839,7 +2838,7 @@
// is designed to prevent extreme behaviour at the end of a clip
// or group of frames.
rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
- twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0);
+ twopass->bits_left = AOMMAX(twopass->bits_left - bits_used, 0);
// Calculate the pct rc error.
if (rc->total_actual_bits) {
@@ -2854,19 +2853,19 @@
twopass->kf_group_bits -= bits_used;
twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
}
- twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0);
+ twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
// Increment the gf group index ready for the next frame.
++twopass->gf_group.index;
// If the rate control is drifting consider adjustment to min or maxq.
- if ((cpi->oxcf.rc_mode != VPX_Q) &&
+ if ((cpi->oxcf.rc_mode != AOM_Q) &&
(cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) &&
!cpi->rc.is_src_frame_alt_ref) {
const int maxq_adj_limit =
rc->worst_quality - twopass->active_worst_quality;
const int minq_adj_limit =
- (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
+ (cpi->oxcf.rc_mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
// Undershoot.
if (rc->rate_error_estimate > cpi->oxcf.under_shoot_pct) {
@@ -2904,17 +2903,17 @@
rc->vbr_bits_off_target_fast +=
fast_extra_thresh - rc->projected_frame_size;
rc->vbr_bits_off_target_fast =
- VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+ AOMMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
// Fast adaptation of minQ if necessary to use up the extra bits.
if (rc->avg_frame_bandwidth) {
twopass->extend_minq_fast =
(int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
}
- twopass->extend_minq_fast = VPXMIN(
+ twopass->extend_minq_fast = AOMMIN(
twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
} else if (rc->vbr_bits_off_target_fast) {
- twopass->extend_minq_fast = VPXMIN(
+ twopass->extend_minq_fast = AOMMIN(
twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
} else {
twopass->extend_minq_fast = 0;
diff --git a/av1/encoder/firstpass.h b/av1/encoder/firstpass.h
index 5623540..2b161a1 100644
--- a/av1/encoder/firstpass.h
+++ b/av1/encoder/firstpass.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_FIRSTPASS_H_
-#define VP10_ENCODER_FIRSTPASS_H_
+#ifndef AV1_ENCODER_FIRSTPASS_H_
+#define AV1_ENCODER_FIRSTPASS_H_
#include "av1/encoder/lookahead.h"
#include "av1/encoder/ratectrl.h"
@@ -162,25 +162,24 @@
GF_GROUP gf_group;
} TWO_PASS;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_init_first_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi);
-void vp10_first_pass(struct VP10_COMP *cpi,
- const struct lookahead_entry *source);
-void vp10_end_first_pass(struct VP10_COMP *cpi);
+void av1_init_first_pass(struct AV1_COMP *cpi);
+void av1_rc_get_first_pass_params(struct AV1_COMP *cpi);
+void av1_first_pass(struct AV1_COMP *cpi, const struct lookahead_entry *source);
+void av1_end_first_pass(struct AV1_COMP *cpi);
-void vp10_init_second_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi);
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_init_second_pass(struct AV1_COMP *cpi);
+void av1_rc_get_second_pass_params(struct AV1_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
// Post encode update of the rate control parameters for 2-pass
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
-void vp10_init_subsampling(struct VP10_COMP *cpi);
+void av1_init_subsampling(struct AV1_COMP *cpi);
-void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
- int *scaled_frame_height);
+void av1_calculate_coded_size(struct AV1_COMP *cpi, int *scaled_frame_width,
+ int *scaled_frame_height);
#if CONFIG_EXT_REFS
static inline int get_number_of_extra_arfs(int interval, int arf_pending) {
@@ -199,4 +198,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_FIRSTPASS_H_
+#endif // AV1_ENCODER_FIRSTPASS_H_
diff --git a/av1/encoder/global_motion.h b/av1/encoder/global_motion.h
index ed088d6..aad8cc4 100644
--- a/av1/encoder/global_motion.h
+++ b/av1/encoder/global_motion.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_GLOBAL_MOTION_H_
-#define VP10_ENCODER_GLOBAL_MOTION_H_
+#ifndef AV1_ENCODER_GLOBAL_MOTION_H_
+#define AV1_ENCODER_GLOBAL_MOTION_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -25,4 +25,4 @@
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_GLOBAL_MOTION_H_
+#endif // AV1_ENCODER_GLOBAL_MOTION_H_
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index ccfab0a..a0e37a3 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "av1/common/idct.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
@@ -18,16 +18,16 @@
static INLINE void fdct32x32(int rd_transform, const int16_t *src,
tran_low_t *dst, int src_stride) {
if (rd_transform)
- vpx_fdct32x32_rd(src, dst, src_stride);
+ aom_fdct32x32_rd(src, dst, src_stride);
else
- vpx_fdct32x32(src, dst, src_stride);
+ aom_fdct32x32(src, dst, src_stride);
}
static void fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_fwht4x4(src_diff, coeff, diff_stride);
+ av1_fwht4x4(src_diff, coeff, diff_stride);
return;
}
@@ -35,7 +35,7 @@
case DCT_DCT:
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+ case ADST_ADST: av1_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
@@ -47,8 +47,8 @@
case V_ADST:
case H_ADST:
case V_FLIPADST:
- case H_FLIPADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
- case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+ case H_FLIPADST: av1_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -59,42 +59,42 @@
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht4x8(src_diff, coeff, diff_stride, tx_type);
+ av1_fht4x8(src_diff, coeff, diff_stride, tx_type);
}
static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht8x4(src_diff, coeff, diff_stride, tx_type);
+ av1_fht8x4(src_diff, coeff, diff_stride, tx_type);
}
static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht8x16(src_diff, coeff, diff_stride, tx_type);
+ av1_fht8x16(src_diff, coeff, diff_stride, tx_type);
}
static void fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht16x8(src_diff, coeff, diff_stride, tx_type);
+ av1_fht16x8(src_diff, coeff, diff_stride, tx_type);
}
static void fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht16x32(src_diff, coeff, diff_stride, tx_type);
+ av1_fht16x32(src_diff, coeff, diff_stride, tx_type);
}
static void fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type,
FWD_TXFM_OPT fwd_txfm_opt) {
(void)fwd_txfm_opt;
- vp10_fht32x16(src_diff, coeff, diff_stride, tx_type);
+ av1_fht32x16(src_diff, coeff, diff_stride, tx_type);
}
#endif // CONFIG_EXT_TX
@@ -107,9 +107,9 @@
case DCT_ADST:
case ADST_ADST:
if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
- vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
+ av1_fht8x8(src_diff, coeff, diff_stride, tx_type);
else // FWD_TXFM_OPT_DC
- vpx_fdct8x8_1(src_diff, coeff, diff_stride);
+ aom_fdct8x8_1(src_diff, coeff, diff_stride);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -122,8 +122,8 @@
case V_ADST:
case H_ADST:
case V_FLIPADST:
- case H_FLIPADST: vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
- case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+ case H_FLIPADST: av1_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -138,9 +138,9 @@
case DCT_ADST:
case ADST_ADST:
if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
- vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+ av1_fht16x16(src_diff, coeff, diff_stride, tx_type);
else // FWD_TXFM_OPT_DC
- vpx_fdct16x16_1(src_diff, coeff, diff_stride);
+ aom_fdct16x16_1(src_diff, coeff, diff_stride);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -153,12 +153,8 @@
case V_ADST:
case H_ADST:
case V_FLIPADST:
- case H_FLIPADST:
- vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
- break;
- case IDTX:
- vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
- break;
+ case H_FLIPADST: av1_fht16x16(src_diff, coeff, diff_stride, tx_type); break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -172,7 +168,7 @@
if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
fdct32x32(rd_transform, src_diff, coeff, diff_stride);
else // FWD_TXFM_OPT_DC
- vpx_fdct32x32_1(src_diff, coeff, diff_stride);
+ aom_fdct32x32_1(src_diff, coeff, diff_stride);
break;
#if CONFIG_EXT_TX
case ADST_DCT:
@@ -183,7 +179,7 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+ av1_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
break;
case V_DCT:
case H_DCT:
@@ -191,23 +187,21 @@
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
- vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+ av1_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
break;
- case IDTX:
- vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
- break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless,
const int bd) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+ av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
return;
}
@@ -216,7 +210,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -224,7 +218,7 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
break;
case V_DCT:
case H_DCT:
@@ -232,9 +226,9 @@
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
- vp10_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
break;
- case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -246,7 +240,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht4x8(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht4x8(src_diff, coeff, diff_stride, tx_type);
}
static void highbd_fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
@@ -254,7 +248,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht8x4(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht8x4(src_diff, coeff, diff_stride, tx_type);
}
static void highbd_fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
@@ -262,7 +256,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht8x16(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht8x16(src_diff, coeff, diff_stride, tx_type);
}
static void highbd_fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
@@ -270,7 +264,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht16x8(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht16x8(src_diff, coeff, diff_stride, tx_type);
}
static void highbd_fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
@@ -278,7 +272,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht16x32(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht16x32(src_diff, coeff, diff_stride, tx_type);
}
static void highbd_fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
@@ -286,7 +280,7 @@
FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
(void)fwd_txfm_opt;
(void)bd;
- vp10_highbd_fht32x16(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht32x16(src_diff, coeff, diff_stride, tx_type);
}
#endif // CONFIG_EXT_TX
@@ -299,7 +293,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -307,7 +301,7 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
break;
case V_DCT:
case H_DCT:
@@ -316,9 +310,9 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST exists only in C
- vp10_highbd_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
break;
- case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -333,7 +327,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -341,7 +335,7 @@
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
- vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
break;
case V_DCT:
case H_DCT:
@@ -350,11 +344,9 @@
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST exists only in C
- vp10_highbd_fht16x16_c(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht16x16_c(src_diff, coeff, diff_stride, tx_type);
break;
- case IDTX:
- vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
- break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0);
}
@@ -368,7 +360,7 @@
(void)fwd_txfm_opt;
switch (tx_type) {
case DCT_DCT:
- vp10_fwd_txfm2d_32x32(src_diff, coeff, diff_stride, tx_type, bd);
+ av1_fwd_txfm2d_32x32(src_diff, coeff, diff_stride, tx_type, bd);
break;
#if CONFIG_EXT_TX
case ADST_DCT:
@@ -385,16 +377,14 @@
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
- vp10_highbd_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
break;
- case IDTX:
- vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
- break;
+ case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type); break;
#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
FWD_TXFM_PARAM *fwd_txfm_param) {
@@ -441,7 +431,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param) {
const int fwd_txfm_opt = fwd_txfm_param->fwd_txfm_opt;
@@ -495,4 +485,4 @@
default: assert(0); break;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/hybrid_fwd_txfm.h b/av1/encoder/hybrid_fwd_txfm.h
index 07b832c..3ab4fd1 100644
--- a/av1/encoder/hybrid_fwd_txfm.h
+++ b/av1/encoder/hybrid_fwd_txfm.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_HYBRID_FWD_TXFM_H_
-#define VP10_ENCODER_HYBRID_FWD_TXFM_H_
+#ifndef AV1_ENCODER_HYBRID_FWD_TXFM_H_
+#define AV1_ENCODER_HYBRID_FWD_TXFM_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
typedef enum FWD_TXFM_OPT { FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC } FWD_TXFM_OPT;
@@ -21,9 +21,9 @@
FWD_TXFM_OPT fwd_txfm_opt;
int rd_transform;
int lossless;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int bd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} FWD_TXFM_PARAM;
#ifdef __cplusplus
@@ -33,13 +33,13 @@
void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
FWD_TXFM_PARAM *fwd_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_HYBRID_FWD_TXFM_H_
+#endif // AV1_ENCODER_HYBRID_FWD_TXFM_H_
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
index 3c4ff7d..094fb62 100644
--- a/av1/encoder/lookahead.c
+++ b/av1/encoder/lookahead.c
@@ -10,7 +10,7 @@
#include <assert.h>
#include <stdlib.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/common.h"
@@ -29,26 +29,26 @@
return buf;
}
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
+void av1_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx) {
if (ctx->buf) {
int i;
- for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
+ for (i = 0; i < ctx->max_sz; i++) aom_free_frame_buffer(&ctx->buf[i].img);
free(ctx->buf);
}
free(ctx);
}
}
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
- unsigned int height,
- unsigned int subsampling_x,
- unsigned int subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_highbitdepth,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_highbitdepth,
#endif
- unsigned int depth) {
+ unsigned int depth) {
struct lookahead_ctx *ctx = NULL;
// Clamp the lookahead queue depth
@@ -66,28 +66,28 @@
ctx->buf = calloc(depth, sizeof(*ctx->buf));
if (!ctx->buf) goto bail;
for (i = 0; i < depth; i++)
- if (vpx_alloc_frame_buffer(
+ if (aom_alloc_frame_buffer(
&ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
+ AOM_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
goto bail;
}
return ctx;
bail:
- vp10_lookahead_destroy(ctx);
+ av1_lookahead_destroy(ctx);
return NULL;
}
#define USE_PARTIAL_COPY 0
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_highbitdepth,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_highbitdepth,
#endif
- unsigned int flags) {
+ unsigned int flags) {
struct lookahead_entry *buf;
#if USE_PARTIAL_COPY
int row, col, active_end;
@@ -117,7 +117,7 @@
#if USE_PARTIAL_COPY
// TODO(jkoleszar): This is disabled for now, as
- // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+ // av1_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
// Only do this partial copy if the following conditions are all met:
// 1. Lookahead queue has has size of 1.
@@ -144,8 +144,8 @@
}
// Only copy this active region.
- vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
- 16, (active_end - col) << 4);
+ av1_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+ 16, (active_end - col) << 4);
// Start again from the end of this active region.
col = active_end;
@@ -158,14 +158,14 @@
if (larger_dimensions) {
YV12_BUFFER_CONFIG new_img;
memset(&new_img, 0, sizeof(new_img));
- if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+ if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, 0))
+ AOM_ENC_BORDER_IN_PIXELS, 0))
return 1;
- vpx_free_frame_buffer(&buf->img);
+ aom_free_frame_buffer(&buf->img);
buf->img = new_img;
} else if (new_dimensions) {
buf->img.y_crop_width = src->y_crop_width;
@@ -176,7 +176,7 @@
buf->img.subsampling_y = src->subsampling_y;
}
// Partial copy not implemented yet
- vp10_copy_and_extend_frame(src, &buf->img);
+ av1_copy_and_extend_frame(src, &buf->img);
#if USE_PARTIAL_COPY
}
#endif
@@ -187,8 +187,8 @@
return 0;
}
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
- int drain) {
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain) {
struct lookahead_entry *buf = NULL;
if (ctx && ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
@@ -198,8 +198,8 @@
return buf;
}
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
- int index) {
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
+ int index) {
struct lookahead_entry *buf = NULL;
if (index >= 0) {
@@ -221,4 +221,4 @@
return buf;
}
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
index 4b26068..35b1e0c 100644
--- a/av1/encoder/lookahead.h
+++ b/av1/encoder/lookahead.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_LOOKAHEAD_H_
-#define VP10_ENCODER_LOOKAHEAD_H_
+#ifndef AV1_ENCODER_LOOKAHEAD_H_
+#define AV1_ENCODER_LOOKAHEAD_H_
#include "aom_scale/yv12config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -43,18 +43,18 @@
* The lookahead stage is a queue of frame buffers on which some analysis
* may be done when buffers are enqueued.
*/
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
- unsigned int height,
- unsigned int subsampling_x,
- unsigned int subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_highbitdepth,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_highbitdepth,
#endif
- unsigned int depth);
+ unsigned int depth);
/**\brief Destroys the lookahead stage
*/
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
+void av1_lookahead_destroy(struct lookahead_ctx *ctx);
/**\brief Enqueue a source buffer
*
@@ -71,12 +71,12 @@
* \param[in] flags Flags set on this frame
* \param[in] active_map Map that specifies which macroblock is active
*/
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end,
-#if CONFIG_VP9_HIGHBITDEPTH
- int use_highbitdepth,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end,
+#if CONFIG_AOM_HIGHBITDEPTH
+ int use_highbitdepth,
#endif
- unsigned int flags);
+ unsigned int flags);
/**\brief Get the next source buffer to encode
*
@@ -88,8 +88,7 @@
* \retval NULL, if drain set and queue is empty
* \retval NULL, if drain not set and queue not of the configured depth
*/
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
- int drain);
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx, int drain);
/**\brief Get a future source buffer to encode
*
@@ -98,17 +97,17 @@
*
* \retval NULL, if no buffer exists at the specified index
*/
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
- int index);
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
+ int index);
/**\brief Get the number of frames currently in the lookahead queue
*
* \param[in] ctx Pointer to the lookahead context
*/
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx);
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_LOOKAHEAD_H_
+#endif // AV1_ENCODER_LOOKAHEAD_H_
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
index 43f0f87..f973e87 100644
--- a/av1/encoder/mbgraph.c
+++ b/av1/encoder/mbgraph.c
@@ -10,11 +10,11 @@
#include <limits.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/system_state.h"
#include "av1/encoder/segmentation.h"
#include "av1/encoder/mcomp.h"
@@ -22,12 +22,12 @@
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv,
int mb_row, int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
- const vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ const aom_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
const int tmp_col_min = x->mv_col_min;
const int tmp_col_max = x->mv_col_max;
@@ -38,16 +38,16 @@
// Further step/diamond searches as necessary
int step_param = mv_sf->reduce_first_step_size;
- step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+ step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
- vp10_set_mv_search_range(x, ref_mv);
+ av1_set_mv_search_range(x, ref_mv);
ref_full.col = ref_mv->col >> 3;
ref_full.row = ref_mv->row >> 3;
/*cpi->sf.search_method == HEX*/
- vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
- cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
+ av1_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
+ cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
// Try sub-pixel MC
// if (bestsme > error_thresh && bestsme < INT_MAX)
@@ -73,7 +73,7 @@
xd->mi[0]->mbmi.ref_frame[1] = NONE;
#endif // CONFIG_EXT_INTER
- vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+ av1_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
/* restore UMV window */
x->mv_col_min = tmp_col_min;
@@ -81,11 +81,11 @@
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
- return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ return aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
}
-static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv, int mb_row,
+static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv, int mb_row,
int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -94,7 +94,7 @@
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
- err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
best_mv.col = best_mv.row = 0;
@@ -123,21 +123,21 @@
return err;
}
-static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
+static int do_16x16_zerozero_search(AV1_COMP *cpi, int_mv *dst_mv) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
unsigned int err;
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
- err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
dst_mv->as_int = 0;
return err;
}
-static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
+static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
PREDICTION_MODE best_mode = -1, mode;
@@ -149,10 +149,10 @@
unsigned int err;
xd->mi[0]->mbmi.mode = mode;
- vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
- x->plane[0].src.stride, xd->plane[0].dst.buf,
- xd->plane[0].dst.stride, 0, 0, 0);
- err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ av1_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+ x->plane[0].src.stride, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, 0, 0, 0);
+ err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
// find best
@@ -167,7 +167,7 @@
return best_err;
}
-static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+static void update_mbgraph_mb_stats(AV1_COMP *cpi, MBGRAPH_MB_STATS *stats,
YV12_BUFFER_CONFIG *buf, int mb_y_offset,
YV12_BUFFER_CONFIG *golden_ref,
const MV *prev_golden_ref_mv,
@@ -176,7 +176,7 @@
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
// FIXME in practice we're completely ignoring chroma here
x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
@@ -220,21 +220,21 @@
}
}
-static void update_mbgraph_frame_stats(VP10_COMP *cpi,
+static void update_mbgraph_frame_stats(AV1_COMP *cpi,
MBGRAPH_FRAME_STATS *stats,
YV12_BUFFER_CONFIG *buf,
YV12_BUFFER_CONFIG *golden_ref,
YV12_BUFFER_CONFIG *alt_ref) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
MV gld_top_mv = { 0, 0 };
MODE_INFO mi_local;
- vp10_zero(mi_local);
+ av1_zero(mi_local);
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
x->mv_row_min = -BORDER_MV_PIXELS_B16;
@@ -287,8 +287,8 @@
}
// void separate_arf_mbs_byzz
-static void separate_arf_mbs(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void separate_arf_mbs(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int mb_col, mb_row, offset, i;
int mi_row, mi_col;
int ncnt[4] = { 0 };
@@ -298,7 +298,7 @@
CHECK_MEM_ERROR(
cm, arf_not_zz,
- vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
+ aom_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
// We are not interested in results beyond the alt ref itself.
if (n_frames > cpi->rc.frames_till_gf_update_due)
@@ -354,19 +354,19 @@
else
cpi->static_mb_pct = 0;
- vp10_enable_segmentation(&cm->seg);
+ av1_enable_segmentation(&cm->seg);
} else {
cpi->static_mb_pct = 0;
- vp10_disable_segmentation(&cm->seg);
+ av1_disable_segmentation(&cm->seg);
}
// Free localy allocated storage
- vpx_free(arf_not_zz);
+ aom_free(arf_not_zz);
}
-void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
- int i, n_frames = vp10_lookahead_depth(cpi->lookahead);
+void av1_update_mbgraph_stats(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
+ int i, n_frames = av1_lookahead_depth(cpi->lookahead);
YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
assert(golden_ref != NULL);
@@ -390,7 +390,7 @@
// the ARF MC search backwards, to get optimal results for MV caching
for (i = 0; i < n_frames; i++) {
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i);
+ struct lookahead_entry *q_cur = av1_lookahead_peek(cpi->lookahead, i);
assert(q_cur != NULL);
@@ -398,7 +398,7 @@
cpi->Source);
}
- vpx_clear_system_state();
+ aom_clear_system_state();
separate_arf_mbs(cpi);
}
diff --git a/av1/encoder/mbgraph.h b/av1/encoder/mbgraph.h
index 0b056af..a01e5de 100644
--- a/av1/encoder/mbgraph.h
+++ b/av1/encoder/mbgraph.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_MBGRAPH_H_
-#define VP10_ENCODER_MBGRAPH_H_
+#ifndef AV1_ENCODER_MBGRAPH_H_
+#define AV1_ENCODER_MBGRAPH_H_
#ifdef __cplusplus
extern "C" {
@@ -27,12 +27,12 @@
typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_update_mbgraph_stats(struct VP10_COMP *cpi);
+void av1_update_mbgraph_stats(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_MBGRAPH_H_
+#endif // AV1_ENCODER_MBGRAPH_H_
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index 9ee06e9..303c5d5 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -12,11 +12,11 @@
#include <math.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/common.h"
@@ -33,16 +33,16 @@
return &buf->buf[mv->row * buf->stride + mv->col];
}
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL;
- col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1);
- row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1);
- col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1);
- row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1);
+ col_min = AOMMAX(col_min, (MV_LOW >> 3) + 1);
+ row_min = AOMMAX(row_min, (MV_LOW >> 3) + 1);
+ col_max = AOMMIN(col_max, (MV_UPP >> 3) - 1);
+ row_max = AOMMIN(row_max, (MV_UPP >> 3) - 1);
// Get intersection of UMV window and valid MV window to reduce # of checks
// in diamond search.
@@ -52,25 +52,25 @@
if (x->mv_row_max > row_max) x->mv_row_max = row_max;
}
-int vp10_init_search_range(int size) {
+int av1_init_search_range(int size) {
int sr = 0;
// Minimum search size no matter what the passed in value.
- size = VPXMAX(16, size);
+ size = AOMMAX(16, size);
while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
- sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
+ sr = AOMMIN(sr, MAX_MVSEARCH_STEPS - 2);
return sr;
}
static INLINE int mv_cost(const MV *mv, const int *joint_cost,
int *const comp_cost[2]) {
- return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+ return joint_cost[av1_get_mv_joint(mv)] + comp_cost[0][mv->row] +
comp_cost[1][mv->col];
}
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
- int *mvcost[2], int weight) {
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
}
@@ -84,7 +84,7 @@
// accuracy in either bit cost or error cost will cause it to overflow.
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
- RDDIV_BITS + VP10_PROB_COST_SHIFT - RD_EPB_SHIFT +
+ RDDIV_BITS + AV1_PROB_COST_SHIFT - RD_EPB_SHIFT +
PIXEL_TRANSFORM_ERROR_SCALE);
}
return 0;
@@ -95,10 +95,10 @@
const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) * sad_per_bit,
- VP10_PROB_COST_SHIFT);
+ AV1_PROB_COST_SHIFT);
}
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
int len, ss_count = 1;
cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -119,7 +119,7 @@
cfg->searches_per_step = 4;
}
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
+void av1_init3smotion_compensation(search_site_config *cfg, int stride) {
int len, ss_count = 1;
cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -297,10 +297,10 @@
int br = bestmv->row * 8; \
int bc = bestmv->col * 8; \
int hstep = 4; \
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
int tr = br; \
int tc = bc; \
\
@@ -309,22 +309,22 @@
static unsigned int setup_center_error(
const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride, const uint8_t *const y,
int y_stride, const uint8_t *second_pred, int w, int h, int offset,
int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (second_pred != NULL) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, comp_pred16[MAX_SB_SQUARE]);
- vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
+ aom_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride);
besterr =
vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
} else {
DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
- vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+ aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
}
} else {
@@ -336,14 +336,14 @@
(void)xd;
if (second_pred != NULL) {
DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
- vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+ aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
} else {
besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
}
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return besterr;
}
@@ -371,9 +371,9 @@
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
}
-int vp10_find_best_sub_pixel_tree_pruned_evenmore(
+int av1_find_best_sub_pixel_tree_pruned_evenmore(
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+ const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, const uint8_t *second_pred, int w, int h,
int use_upsampled_ref) {
@@ -423,7 +423,7 @@
tr = br;
tc = bc;
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
hstep >>= 1;
FIRST_LEVEL_CHECKS;
if (eighthiters > 1) {
@@ -441,9 +441,9 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned_more(
+int av1_find_best_sub_pixel_tree_pruned_more(
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+ const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, const uint8_t *second_pred, int w, int h,
int use_upsampled_ref) {
@@ -483,7 +483,7 @@
}
}
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
tr = br;
tc = bc;
hstep >>= 1;
@@ -507,9 +507,9 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned(
+int av1_find_best_sub_pixel_tree_pruned(
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+ const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, const uint8_t *second_pred, int w, int h,
int use_upsampled_ref) {
@@ -571,7 +571,7 @@
tc = bc;
}
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
hstep >>= 1;
FIRST_LEVEL_CHECKS;
if (eighthiters > 1) {
@@ -605,20 +605,20 @@
/* clang-format on */
static int upsampled_pref_error(const MACROBLOCKD *xd,
- const vpx_variance_fn_ptr_t *vfp,
+ const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride,
const uint8_t *const y, int y_stride,
const uint8_t *second_pred, int w, int h,
unsigned int *sse) {
unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
if (second_pred != NULL)
- vpx_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
+ aom_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
y_stride);
else
- vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+ aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
besterr = vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, sse);
} else {
@@ -626,14 +626,14 @@
#else
DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
(void)xd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (second_pred != NULL)
- vpx_comp_avg_upsampled_pred(pred, second_pred, w, h, y, y_stride);
+ aom_comp_avg_upsampled_pred(pred, second_pred, w, h, y, y_stride);
else
- vpx_upsampled_pred(pred, w, h, y, y_stride);
+ aom_upsampled_pred(pred, w, h, y, y_stride);
besterr = vfp->vf(pred, w, src, src_stride, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
return besterr;
@@ -641,7 +641,7 @@
static unsigned int upsampled_setup_center_error(
const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride, const uint8_t *const y,
int y_stride, const uint8_t *second_pred, int w, int h, int offset,
int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
@@ -652,14 +652,14 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree(MACROBLOCK *x, const MV *ref_mv, int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- int forced_stop, int iters_per_step,
- int *cost_list, int *mvjcost, int *mvcost[2],
- int *distortion, unsigned int *sse1,
- const uint8_t *second_pred, int w, int h,
- int use_upsampled_ref) {
+int av1_find_best_sub_pixel_tree(MACROBLOCK *x, const MV *ref_mv, int allow_hp,
+ int error_per_bit,
+ const aom_variance_fn_ptr_t *vfp,
+ int forced_stop, int iters_per_step,
+ int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1,
+ const uint8_t *second_pred, int w, int h,
+ int use_upsampled_ref) {
const uint8_t *const src_address = x->plane[0].src.buf;
const int src_stride = x->plane[0].src.stride;
const MACROBLOCKD *xd = &x->e_mbd;
@@ -675,10 +675,10 @@
int bc = bestmv->col * 8;
int hstep = 4;
int iter, round = 3 - forced_stop;
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
int tr = br;
int tc = bc;
const MV *search_step = search_step_table;
@@ -686,7 +686,7 @@
unsigned int cost_array[5];
int kr, kc;
- if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+ if (!(allow_hp && av1_use_mv_hp(ref_mv)))
if (round == 3) round = 2;
bestmv->row *= 8;
@@ -852,7 +852,7 @@
// Calculate and return a sad+mvcost list around an integer best pel.
static INLINE void calc_int_cost_list(const MACROBLOCK *x,
const MV *const ref_mv, int sadpb,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *best_mv, int *cost_list) {
static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
@@ -896,7 +896,7 @@
static INLINE void calc_int_sad_list(const MACROBLOCK *x,
const MV *const ref_mv, int sadpb,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *best_mv, int *cost_list,
const int use_mvcost, const int bestsad) {
static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
@@ -946,7 +946,7 @@
//
static int pattern_search(
MACROBLOCK *x, MV *start_mv, int search_param, int sad_per_bit,
- int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+ int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
int use_mvcost, const MV *center_mv,
const int num_candidates[MAX_PATTERN_SCALES],
const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
@@ -1198,9 +1198,9 @@
return bestsad;
}
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
- const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost) {
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
+ int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
@@ -1214,9 +1214,9 @@
: 0);
}
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
- const MV *center_mv, const uint8_t *second_pred,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost) {
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
@@ -1230,10 +1230,10 @@
: 0);
}
-int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
- int sad_per_bit, int do_init_search, int *cost_list,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
- const MV *center_mv) {
+int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv) {
// First scale has 8-closest points, the rest have 6 points in hex shape
// at increasing scales
static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
@@ -1268,7 +1268,7 @@
static int bigdia_search(MACROBLOCK *x, MV *start_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv) {
// First scale has 4-closest points, the rest have 8 points in diamond
// shape at increasing scales
@@ -1309,7 +1309,7 @@
static int square_search(MACROBLOCK *x, MV *start_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv) {
// All scales have 8 closest points in square shape
static const int square_num_candidates[MAX_PATTERN_SCALES] = {
@@ -1351,18 +1351,18 @@
static int fast_hex_search(MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit,
int do_init_search, // must be zero for fast_hex
- int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+ int *cost_list, const aom_variance_fn_ptr_t *vfp,
int use_mvcost, const MV *center_mv) {
- return vp10_hex_search(
- x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost, center_mv);
+ return av1_hex_search(x, ref_mv, AOMMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+ sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
+ center_mv);
}
static int fast_dia_search(MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv) {
- return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+ return bigdia_search(x, ref_mv, AOMMAX(MAX_MVSEARCH_STEPS - 2, search_param),
sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
center_mv);
}
@@ -1373,7 +1373,7 @@
// step size.
static int exhuastive_mesh_search(MACROBLOCK *x, MV *ref_mv, MV *best_mv,
int range, int step, int sad_per_bit,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -1393,10 +1393,10 @@
fn_ptr->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
- start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
- start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
- end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
- end_col = VPXMIN(range, x->mv_col_max - fcenter_mv.col);
+ start_row = AOMMAX(-range, x->mv_row_min - fcenter_mv.row);
+ start_col = AOMMAX(-range, x->mv_col_min - fcenter_mv.col);
+ end_row = AOMMIN(range, x->mv_row_max - fcenter_mv.row);
+ end_col = AOMMIN(range, x->mv_col_max - fcenter_mv.col);
for (r = start_row; r <= end_row; r += step) {
for (c = start_col; c <= end_col; c += col_step) {
@@ -1460,11 +1460,11 @@
return best_sad;
}
-int vp10_diamond_search_sad_c(MACROBLOCK *x, const search_site_config *cfg,
- MV *ref_mv, MV *best_mv, int search_param,
- int sad_per_bit, int *num00,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv) {
+int av1_diamond_search_sad_c(MACROBLOCK *x, const search_site_config *cfg,
+ MV *ref_mv, MV *best_mv, int search_param,
+ int sad_per_bit, int *num00,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv) {
int i, j, step;
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1609,7 +1609,7 @@
int center, offset = 0;
int bw = 4 << bwl; // redundant variable, to be changed in the experiments.
for (d = 0; d <= bw; d += 16) {
- this_sad = vpx_vector_var(&ref[d], src, bwl);
+ this_sad = aom_vector_var(&ref[d], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
offset = d;
@@ -1621,7 +1621,7 @@
int this_pos = offset + d;
// check limit
if (this_pos < 0 || this_pos > bw) continue;
- this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+ this_sad = aom_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
center = this_pos;
@@ -1633,7 +1633,7 @@
int this_pos = offset + d;
// check limit
if (this_pos < 0 || this_pos > bw) continue;
- this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+ this_sad = aom_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
center = this_pos;
@@ -1645,7 +1645,7 @@
int this_pos = offset + d;
// check limit
if (this_pos < 0 || this_pos > bw) continue;
- this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+ this_sad = aom_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
center = this_pos;
@@ -1657,7 +1657,7 @@
int this_pos = offset + d;
// check limit
if (this_pos < 0 || this_pos > bw) continue;
- this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+ this_sad = aom_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
center = this_pos;
@@ -1671,9 +1671,9 @@
{ -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
};
-unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, int mi_row,
- int mi_col) {
+unsigned int av1_int_pro_motion_estimation(const AV1_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
@@ -1694,7 +1694,7 @@
MV this_mv;
const int norm_factor = 3 + (bw >> 5);
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
if (scaled_ref_frame) {
int i;
@@ -1702,10 +1702,10 @@
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
- vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
{
unsigned int this_sad;
tmp_mv->row = 0;
@@ -1724,25 +1724,25 @@
// Set up prediction 1-D reference set
ref_buf = xd->plane[0].pre[0].buf - (bw >> 1);
for (idx = 0; idx < search_width; idx += 16) {
- vpx_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
+ aom_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
ref_buf += 16;
}
ref_buf = xd->plane[0].pre[0].buf - (bh >> 1) * ref_stride;
for (idx = 0; idx < search_height; ++idx) {
- vbuf[idx] = vpx_int_pro_col(ref_buf, bw) >> norm_factor;
+ vbuf[idx] = aom_int_pro_col(ref_buf, bw) >> norm_factor;
ref_buf += ref_stride;
}
// Set up src 1-D reference set
for (idx = 0; idx < bw; idx += 16) {
src_buf = x->plane[0].src.buf + idx;
- vpx_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
+ aom_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
}
src_buf = x->plane[0].src.buf;
for (idx = 0; idx < bh; ++idx) {
- src_vbuf[idx] = vpx_int_pro_col(src_buf, bw) >> norm_factor;
+ src_vbuf[idx] = aom_int_pro_col(src_buf, bw) >> norm_factor;
src_buf += src_stride;
}
@@ -1803,17 +1803,17 @@
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-static int full_pixel_diamond(VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+static int full_pixel_diamond(AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
int step_param, int sadpb, int further_steps,
int do_refine, int *cost_list,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv) {
MV temp_mv;
int thissme, n, num00 = 0;
int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
step_param, sadpb, &n, fn_ptr, ref_mv);
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
x->best_mv.as_mv = temp_mv;
// If there won't be more n-step search, check to see if refining search is
@@ -1830,7 +1830,7 @@
step_param + n, sadpb, &num00, fn_ptr,
ref_mv);
if (thissme < INT_MAX)
- thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ thissme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
// check to see if refining search is needed.
if (num00 > further_steps - n) do_refine = 0;
@@ -1846,10 +1846,10 @@
if (do_refine) {
const int search_range = 8;
MV best_mv = x->best_mv.as_mv;
- thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
- ref_mv);
+ thissme = av1_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+ ref_mv);
if (thissme < INT_MAX)
- thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
+ thissme = av1_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
if (thissme < bestsme) {
bestsme = thissme;
x->best_mv.as_mv = best_mv;
@@ -1868,10 +1868,10 @@
#define MIN_INTERVAL 1
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
-static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
+static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
const MV *centre_mv_full, int sadpb,
int *cost_list,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv) {
const SPEED_FEATURES *const sf = &cpi->sf;
MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
@@ -1894,9 +1894,9 @@
// Check size of proposed first range against magnitude of the centre
// value used as a starting point.
- range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
- range = VPXMIN(range, MAX_RANGE);
- interval = VPXMAX(interval, range / baseline_interval_divisor);
+ range = AOMMAX(range, (5 * AOMMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
+ range = AOMMIN(range, MAX_RANGE);
+ interval = AOMMAX(interval, range / baseline_interval_divisor);
// initial search
bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
@@ -1916,7 +1916,7 @@
}
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
*dst_mv = temp_mv;
// Return cost list.
@@ -1926,18 +1926,18 @@
return bestsme;
}
-int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+int av1_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r, c;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
- const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
- const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
- const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+ const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+ const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+ const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+ const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
int best_sad =
fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -1961,18 +1961,18 @@
return best_sad;
}
-int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+int av1_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
- const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
- const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
- const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+ const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+ const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+ const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+ const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
unsigned int best_sad =
fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -2027,18 +2027,18 @@
return best_sad;
}
-int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+int av1_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
- const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
- const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
- const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+ const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+ const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+ const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+ const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
unsigned int best_sad =
fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -2117,10 +2117,10 @@
return best_sad;
}
-int vp10_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv) {
+int av1_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+ int search_range,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv) {
const MACROBLOCKD *const xd = &x->e_mbd;
const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
@@ -2193,10 +2193,9 @@
// This function is called when we do joint motion search in comp_inter_inter
// mode.
-int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, const uint8_t *second_pred) {
+int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, const uint8_t *second_pred) {
const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
{ -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -2242,10 +2241,10 @@
}
#define MIN_EX_SEARCH_LIMIT 128
-static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
+static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
const int max_ex =
- VPXMAX(MIN_EX_SEARCH_LIMIT,
+ AOMMAX(MIN_EX_SEARCH_LIMIT,
(*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
return sf->allow_exhaustive_searches &&
@@ -2253,13 +2252,13 @@
(*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
- MV *mvp_full, int step_param, int error_per_bit,
- int *cost_list, const MV *ref_mv, int var_max,
- int rd) {
+int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ MV *mvp_full, int step_param, int error_per_bit,
+ int *cost_list, const MV *ref_mv, int var_max,
+ int rd) {
const SPEED_FEATURES *const sf = &cpi->sf;
const SEARCH_METHODS method = sf->mv.search_method;
- vpx_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
+ aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
int var = 0;
if (cost_list) {
@@ -2283,8 +2282,8 @@
cost_list, fn_ptr, 1, ref_mv);
break;
case HEX:
- var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv);
+ var = av1_hex_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+ fn_ptr, 1, ref_mv);
break;
case SQUARE:
var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
@@ -2326,7 +2325,7 @@
}
if (method != NSTEP && rd && var < var_max)
- var = vp10_get_mvpred_var(x, &x->best_mv.as_mv, ref_mv, fn_ptr, 1);
+ var = av1_get_mvpred_var(x, &x->best_mv.as_mv, ref_mv, fn_ptr, 1);
return var;
}
@@ -2382,10 +2381,10 @@
v = INT_MAX; \
}
-int vp10_find_best_masked_sub_pixel_tree(
+int av1_find_best_masked_sub_pixel_tree(
const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+ const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second) {
const uint8_t *const z = x->plane[0].src.buf;
@@ -2408,10 +2407,10 @@
int br = bestmv->row * 8;
int bc = bestmv->col * 8;
int hstep = 4;
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
int tr = br;
int tc = bc;
@@ -2445,7 +2444,7 @@
tc = bc;
}
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
hstep >>= 1;
FIRST_LEVEL_CHECKS;
if (eighthiters > 1) {
@@ -2471,7 +2470,7 @@
static unsigned int setup_masked_center_error(
const uint8_t *mask, int mask_stride, const MV *bestmv, const MV *ref_mv,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride, const uint8_t *const y,
int y_stride, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
int *distortion) {
@@ -2485,16 +2484,16 @@
static int upsampled_masked_pref_error(const MACROBLOCKD *xd,
const uint8_t *mask, int mask_stride,
- const vpx_variance_fn_ptr_t *vfp,
+ const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src,
const int src_stride,
const uint8_t *const y, int y_stride,
int w, int h, unsigned int *sse) {
unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
- vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+ aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
besterr = vfp->mvf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, mask,
mask_stride, sse);
@@ -2503,11 +2502,11 @@
#else
DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
(void)xd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_upsampled_pred(pred, w, h, y, y_stride);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_upsampled_pred(pred, w, h, y, y_stride);
besterr = vfp->mvf(pred, w, src, src_stride, mask, mask_stride, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
return besterr;
@@ -2516,7 +2515,7 @@
static unsigned int upsampled_setup_masked_center_error(
const MACROBLOCKD *xd, const uint8_t *mask, int mask_stride,
const MV *bestmv, const MV *ref_mv, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, const uint8_t *const src,
+ const aom_variance_fn_ptr_t *vfp, const uint8_t *const src,
const int src_stride, const uint8_t *const y, int y_stride, int w, int h,
int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
int *distortion) {
@@ -2528,10 +2527,10 @@
return besterr;
}
-int vp10_find_best_masked_sub_pixel_tree_up(
- VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+int av1_find_best_masked_sub_pixel_tree_up(
+ AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, int is_second, int use_upsampled_ref) {
const uint8_t *const z = x->plane[0].src.buf;
@@ -2551,10 +2550,10 @@
int hstep = 4;
int iter;
int round = 3 - forced_stop;
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
int tr = br;
int tc = bc;
const MV *search_step = search_step_table;
@@ -2580,7 +2579,7 @@
y_stride = pd->pre[is_second].stride;
offset = bestmv->row * y_stride + bestmv->col;
- if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+ if (!(allow_hp && av1_use_mv_hp(ref_mv)))
if (round == 3) round = 2;
bestmv->row *= 8;
@@ -2717,7 +2716,7 @@
static int get_masked_mvpred_var(const MACROBLOCK *x, const uint8_t *mask,
int mask_stride, const MV *best_mv,
const MV *center_mv,
- const vpx_variance_fn_ptr_t *vfp,
+ const aom_variance_fn_ptr_t *vfp,
int use_mvcost, int is_second) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -2735,7 +2734,7 @@
int masked_refining_search_sad(const MACROBLOCK *x, const uint8_t *mask,
int mask_stride, MV *ref_mv, int error_per_bit,
int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, int is_second) {
const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -2782,7 +2781,7 @@
const search_site_config *cfg,
const uint8_t *mask, int mask_stride, MV *ref_mv,
MV *best_mv, int search_param, int sad_per_bit,
- int *num00, const vpx_variance_fn_ptr_t *fn_ptr,
+ int *num00, const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, int is_second) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -2867,13 +2866,12 @@
return best_sad;
}
-int vp10_masked_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
- const uint8_t *mask, int mask_stride,
- MV *mvp_full, int step_param, int sadpb,
- int further_steps, int do_refine,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv,
- int is_second) {
+int av1_masked_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x,
+ const uint8_t *mask, int mask_stride,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv, int is_second) {
MV temp_mv;
int thissme, n, num00 = 0;
int bestsme = masked_diamond_search_sad(x, &cpi->ss_cfg, mask, mask_stride,
@@ -2980,7 +2978,7 @@
static unsigned int setup_obmc_center_error(
const int32_t *mask, const MV *bestmv, const MV *ref_mv, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
+ const aom_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
const uint8_t *const y, int y_stride, int offset, int *mvjcost,
int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr;
@@ -2991,15 +2989,15 @@
}
static int upsampled_obmc_pref_error(const MACROBLOCKD *xd, const int32_t *mask,
- const vpx_variance_fn_ptr_t *vfp,
+ const aom_variance_fn_ptr_t *vfp,
const int32_t *const wsrc,
const uint8_t *const y, int y_stride,
int w, int h, unsigned int *sse) {
unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
- vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+ aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
besterr = vfp->ovf(CONVERT_TO_BYTEPTR(pred16), w, wsrc, mask, sse);
} else {
@@ -3007,11 +3005,11 @@
#else
DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
(void)xd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vpx_upsampled_pred(pred, w, h, y, y_stride);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ aom_upsampled_pred(pred, w, h, y, y_stride);
besterr = vfp->ovf(pred, w, wsrc, mask, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
return besterr;
@@ -3019,7 +3017,7 @@
static unsigned int upsampled_setup_obmc_center_error(
const MACROBLOCKD *xd, const int32_t *mask, const MV *bestmv,
- const MV *ref_mv, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ const MV *ref_mv, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const int32_t *const wsrc, const uint8_t *const y, int y_stride, int w,
int h, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
int *distortion) {
@@ -3030,10 +3028,10 @@
return besterr;
}
-int vp10_find_best_obmc_sub_pixel_tree_up(
- VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc, const int32_t *mask,
+int av1_find_best_obmc_sub_pixel_tree_up(
+ AV1_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc, const int32_t *mask,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, int is_second, int use_upsampled_ref) {
const int *const z = wsrc;
@@ -3052,10 +3050,10 @@
int hstep = 4;
int iter;
int round = 3 - forced_stop;
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
int tr = br;
int tc = bc;
const MV *search_step = search_step_table;
@@ -3081,7 +3079,7 @@
y_stride = pd->pre[is_second].stride;
offset = bestmv->row * y_stride + bestmv->col;
- if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+ if (!(allow_hp && av1_use_mv_hp(ref_mv)))
if (round == 3) round = 2;
bestmv->row *= 8;
@@ -3213,7 +3211,7 @@
static int get_obmc_mvpred_var(const MACROBLOCK *x, const int32_t *wsrc,
const int32_t *mask, const MV *best_mv,
const MV *center_mv,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
int is_second) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
@@ -3230,7 +3228,7 @@
int obmc_refining_search_sad(const MACROBLOCK *x, const int32_t *wsrc,
const int32_t *mask, MV *ref_mv, int error_per_bit,
int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, int is_second) {
const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -3274,7 +3272,7 @@
const int32_t *wsrc, const int32_t *mask,
MV *ref_mv, MV *best_mv, int search_param,
int sad_per_bit, int *num00,
- const vpx_variance_fn_ptr_t *fn_ptr,
+ const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, int is_second) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
@@ -3355,12 +3353,12 @@
return best_sad;
}
-int vp10_obmc_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
- const int32_t *wsrc, const int32_t *mask,
- MV *mvp_full, int step_param, int sadpb,
- int further_steps, int do_refine,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv, int is_second) {
+int av1_obmc_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x,
+ const int32_t *wsrc, const int32_t *mask,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv, int is_second) {
MV temp_mv;
int thissme, n, num00 = 0;
int bestsme =
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index d26b9bd..3c57139 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_MCOMP_H_
-#define VP10_ENCODER_MCOMP_H_
+#ifndef AV1_ENCODER_MCOMP_H_
+#define AV1_ENCODER_MCOMP_H_
#include "av1/encoder/block.h"
#include "aom_dsp/variance.h"
@@ -28,7 +28,7 @@
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
// Allowed motion vector pixel distance outside image border
// for Block_16x16
-#define BORDER_MV_PIXELS_B16 (16 + VPX_INTERP_EXTEND)
+#define BORDER_MV_PIXELS_B16 (16 + AOM_INTERP_EXTEND)
// motion search site
typedef struct search_site {
@@ -42,115 +42,114 @@
int searches_per_step;
} search_site_config;
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride);
+void av1_init3smotion_compensation(search_site_config *cfg, int stride);
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
- int *mvcost[2], int weight);
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv);
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight);
// Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
- const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
- const MV *center_mv, const uint8_t *second_pred,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost);
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
+ int use_mvcost);
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost);
-struct VP10_COMP;
+struct AV1_COMP;
struct SPEED_FEATURES;
-int vp10_init_search_range(int size);
+int av1_init_search_range(int size);
-int vp10_refining_search_sad(struct macroblock *x, struct mv *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const struct mv *center_mv);
+int av1_refining_search_sad(struct macroblock *x, struct mv *ref_mv,
+ int sad_per_bit, int distance,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const struct mv *center_mv);
// Runs sequence of diamond searches in smaller steps for RD.
-int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
- MV *mvp_full, int step_param, int sadpb,
- int further_steps, int do_refine, int *cost_list,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv);
+int av1_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine, int *cost_list,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv);
// Perform integral projection based motion estimation.
-unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
- MACROBLOCK *x, BLOCK_SIZE bsize,
- int mi_row, int mi_col);
+unsigned int av1_int_pro_motion_estimation(const struct AV1_COMP *cpi,
+ MACROBLOCK *x, BLOCK_SIZE bsize,
+ int mi_row, int mi_col);
-int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
- int sad_per_bit, int do_init_search, int *cost_list,
- const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
- const MV *center_mv);
+int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const aom_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv);
typedef int(fractional_mv_step_fp)(
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
+ const aom_variance_fn_ptr_t *vfp,
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
int h, int use_upsampled_ref);
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_more;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_evenmore;
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv);
+typedef int (*av1_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv);
-typedef int (*vp10_diamond_search_fn_t)(
+typedef int (*av1_diamond_search_fn_t)(
MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
int search_param, int sad_per_bit, int *num00,
- const vpx_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
+ const aom_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
-int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, const uint8_t *second_pred);
+int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, const uint8_t *second_pred);
-struct VP10_COMP;
+struct AV1_COMP;
-int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, MV *mvp_full, int step_param,
- int error_per_bit, int *cost_list, const MV *ref_mv,
- int var_max, int rd);
+int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ MV *mvp_full, int step_param, int error_per_bit,
+ int *cost_list, const MV *ref_mv, int var_max,
+ int rd);
#if CONFIG_EXT_INTER
-int vp10_find_best_masked_sub_pixel_tree(
+int av1_find_best_masked_sub_pixel_tree(
const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+ const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second);
-int vp10_find_best_masked_sub_pixel_tree_up(
- struct VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+int av1_find_best_masked_sub_pixel_tree_up(
+ struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
- int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, int is_second, int use_upsampled_ref);
-int vp10_masked_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
- const uint8_t *mask, int mask_stride,
- MV *mvp_full, int step_param, int sadpb,
- int further_steps, int do_refine,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv, int is_second);
+int av1_masked_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ const uint8_t *mask, int mask_stride,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv, int is_second);
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
-int vp10_obmc_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
- const int32_t *wsrc, const int32_t *mask,
- MV *mvp_full, int step_param, int sadpb,
- int further_steps, int do_refine,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv, int is_second);
-int vp10_find_best_obmc_sub_pixel_tree_up(
- struct VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc,
+int av1_obmc_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ const int32_t *wsrc, const int32_t *mask,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine,
+ const aom_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv, int is_second);
+int av1_find_best_obmc_sub_pixel_tree_up(
+ struct AV1_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc,
const int32_t *mask, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
- int allow_hp, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ int allow_hp, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
#endif // CONFIG_OBMC
@@ -158,4 +157,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_MCOMP_H_
+#endif // AV1_ENCODER_MCOMP_H_
diff --git a/av1/encoder/mips/msa/error_msa.c b/av1/encoder/mips/msa/error_msa.c
index 71c5ad3..ad422f1 100644
--- a/av1/encoder/mips/msa/error_msa.c
+++ b/av1/encoder/mips/msa/error_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \
@@ -86,9 +86,9 @@
BLOCK_ERROR_BLOCKSIZE_MSA(1024)
/* clang-format on */
-int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
- const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
- int64_t *ssz) {
+int64_t av1_block_error_msa(const tran_low_t *coeff_ptr,
+ const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+ int64_t *ssz) {
int64_t err;
const int16_t *coeff = (const int16_t *)coeff_ptr;
const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
@@ -99,7 +99,7 @@
case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
default:
- err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
+ err = av1_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
break;
}
diff --git a/av1/encoder/mips/msa/fdct16x16_msa.c b/av1/encoder/mips/msa/fdct16x16_msa.c
index cda2138..252d118 100644
--- a/av1/encoder/mips/msa/fdct16x16_msa.c
+++ b/av1/encoder/mips/msa/fdct16x16_msa.c
@@ -403,8 +403,8 @@
ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
}
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
- int32_t tx_type) {
+void av1_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+ int32_t tx_type) {
DECLARE_ALIGNED(32, int16_t, tmp[256]);
DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
diff --git a/av1/encoder/mips/msa/fdct4x4_msa.c b/av1/encoder/mips/msa/fdct4x4_msa.c
index a3731c3..26087e4 100644
--- a/av1/encoder/mips/msa/fdct4x4_msa.c
+++ b/av1/encoder/mips/msa/fdct4x4_msa.c
@@ -13,8 +13,8 @@
#include "av1/common/enums.h"
#include "av1/encoder/mips/msa/fdct_msa.h"
-void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
- int32_t src_stride) {
+void av1_fwht4x4_msa(const int16_t *input, int16_t *output,
+ int32_t src_stride) {
v8i16 in0, in1, in2, in3, in4;
LD_SH4(input, src_stride, in0, in1, in2, in3);
@@ -45,8 +45,8 @@
ST4x2_UB(in2, output + 12, 4);
}
-void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
- int32_t tx_type) {
+void av1_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
+ int32_t tx_type) {
v8i16 in0, in1, in2, in3;
LD_SH4(input, stride, in0, in1, in2, in3);
@@ -67,24 +67,24 @@
switch (tx_type) {
case DCT_DCT:
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case ADST_DCT:
- VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case DCT_ADST:
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
case ADST_ADST:
- VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
default: assert(0); break;
}
diff --git a/av1/encoder/mips/msa/fdct8x8_msa.c b/av1/encoder/mips/msa/fdct8x8_msa.c
index 3b6532a..aa759cc 100644
--- a/av1/encoder/mips/msa/fdct8x8_msa.c
+++ b/av1/encoder/mips/msa/fdct8x8_msa.c
@@ -13,8 +13,8 @@
#include "av1/common/enums.h"
#include "av1/encoder/mips/msa/fdct_msa.h"
-void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
- int32_t tx_type) {
+void av1_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
+ int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
@@ -23,35 +23,35 @@
switch (tx_type) {
case DCT_DCT:
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case ADST_DCT:
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case DCT_ADST:
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case ADST_ADST:
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
default: assert(0); break;
diff --git a/av1/encoder/mips/msa/fdct_msa.h b/av1/encoder/mips/msa/fdct_msa.h
index 07471d0..7373659 100644
--- a/av1/encoder/mips/msa/fdct_msa.h
+++ b/av1/encoder/mips/msa/fdct_msa.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
-#define VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#ifndef AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
+#define AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
#include "aom_dsp/mips/fwd_txfm_msa.h"
#include "aom_dsp/mips/txfm_macros_msa.h"
#include "aom_ports/mem.h"
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+#define AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
{ \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
@@ -79,7 +79,7 @@
out5 = -out5; \
}
-#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
+#define AOM_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
@@ -113,4 +113,4 @@
PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
out0, out1, out2, out3); \
}
-#endif // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#endif // AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
diff --git a/av1/encoder/mips/msa/temporal_filter_msa.c b/av1/encoder/mips/msa/temporal_filter_msa.c
index 4d60d37..17b7b82 100644
--- a/av1/encoder/mips/msa/temporal_filter_msa.c
+++ b/av1/encoder/mips/msa/temporal_filter_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
@@ -265,11 +265,11 @@
}
}
-void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
- uint8_t *frame2_ptr, uint32_t blk_w,
- uint32_t blk_h, int32_t strength,
- int32_t filt_wgt, uint32_t *accu,
- uint16_t *cnt) {
+void av1_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
+ uint8_t *frame2_ptr, uint32_t blk_w,
+ uint32_t blk_h, int32_t strength,
+ int32_t filt_wgt, uint32_t *accu,
+ uint16_t *cnt) {
if (8 == (blk_w * blk_h)) {
temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
filt_wgt, accu, cnt);
@@ -277,7 +277,7 @@
temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
filt_wgt, accu, cnt);
} else {
- vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
- strength, filt_wgt, accu, cnt);
+ av1_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
+ strength, filt_wgt, accu, cnt);
}
}
diff --git a/av1/encoder/palette.c b/av1/encoder/palette.c
index 74f91b7..8da52f7 100644
--- a/av1/encoder/palette.c
+++ b/av1/encoder/palette.c
@@ -22,8 +22,8 @@
return dist;
}
-void vp10_calc_indices(const float *data, const float *centroids,
- uint8_t *indices, int n, int k, int dim) {
+void av1_calc_indices(const float *data, const float *centroids,
+ uint8_t *indices, int n, int k, int dim) {
int i, j;
for (i = 0; i < n; ++i) {
float min_dist = calc_dist(data + i * dim, centroids, dim);
@@ -93,14 +93,14 @@
return dist;
}
-void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
- int k, int dim, int max_itr) {
+void av1_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+ int k, int dim, int max_itr) {
int i;
float this_dist;
float pre_centroids[2 * PALETTE_MAX_SIZE];
uint8_t pre_indices[MAX_SB_SQUARE];
- vp10_calc_indices(data, centroids, indices, n, k, dim);
+ av1_calc_indices(data, centroids, indices, n, k, dim);
this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
for (i = 0; i < max_itr; ++i) {
@@ -109,7 +109,7 @@
memcpy(pre_indices, indices, sizeof(pre_indices[0]) * n);
calc_centroids(data, centroids, indices, n, k, dim);
- vp10_calc_indices(data, centroids, indices, n, k, dim);
+ av1_calc_indices(data, centroids, indices, n, k, dim);
this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
if (this_dist > pre_dist) {
@@ -128,7 +128,7 @@
return (fa > fb) - (fb < fa);
}
-int vp10_remove_duplicates(float *centroids, int num_centroids) {
+int av1_remove_duplicates(float *centroids, int num_centroids) {
int num_unique; // number of unique centroids
int i;
qsort(centroids, num_centroids, sizeof(*centroids), float_comparer);
@@ -142,7 +142,7 @@
return num_unique;
}
-int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols) {
+int av1_count_colors(const uint8_t *src, int stride, int rows, int cols) {
int n = 0, r, c, i, val_count[256];
uint8_t val;
memset(val_count, 0, sizeof(val_count));
@@ -163,9 +163,9 @@
return n;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
- int cols, int bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
+int av1_count_colors_highbd(const uint8_t *src8, int stride, int rows, int cols,
+ int bit_depth) {
int n = 0, r, c, i;
uint16_t val;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -188,4 +188,4 @@
return n;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/palette.h b/av1/encoder/palette.h
index fbbb39c..e570e4d 100644
--- a/av1/encoder/palette.h
+++ b/av1/encoder/palette.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_PALETTE_H_
-#define VP10_ENCODER_PALETTE_H_
+#ifndef AV1_ENCODER_PALETTE_H_
+#define AV1_ENCODER_PALETTE_H_
#include "av1/common/blockd.h"
@@ -17,30 +17,30 @@
extern "C" {
#endif
-void vp10_calc_indices(const float *data, const float *centroids,
- uint8_t *indices, int n, int k, int dim);
+void av1_calc_indices(const float *data, const float *centroids,
+ uint8_t *indices, int n, int k, int dim);
// Given 'data' of size 'n' and initial guess of 'centroids' of size 'k x dim',
// runs up to 'max_itr' iterations of k-means algorithm to get updated
// 'centroids' and the centroid 'indices' for elements in 'data'.
// Note: the output centroids are rounded off to nearest integers.
-void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
- int k, int dim, int max_itr);
+void av1_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+ int k, int dim, int max_itr);
// Given a list of centroids, returns the unique number of centroids 'k', and
// puts these unique centroids in first 'k' indices of 'centroids' array.
// Ideally, the centroids should be rounded to integers before calling this
// method.
-int vp10_remove_duplicates(float *centroids, int num_centroids);
+int av1_remove_duplicates(float *centroids, int num_centroids);
-int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols);
-#if CONFIG_VP9_HIGHBITDEPTH
-int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
- int cols, int bit_depth);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+int av1_count_colors(const uint8_t *src, int stride, int rows, int cols);
+#if CONFIG_AOM_HIGHBITDEPTH
+int av1_count_colors_highbd(const uint8_t *src8, int stride, int rows, int cols,
+ int bit_depth);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif /* VP10_ENCODER_PALETTE_H_ */
+#endif /* AV1_ENCODER_PALETTE_H_ */
diff --git a/av1/encoder/pickdering.c b/av1/encoder/pickdering.c
index 91e9b54..b5db43c 100644
--- a/av1/encoder/pickdering.c
+++ b/av1/encoder/pickdering.c
@@ -10,12 +10,12 @@
#include <string.h>
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "av1/common/dering.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/reconinter.h"
#include "av1/encoder/encoder.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
static double compute_dist(int16_t *x, int xstride, int16_t *y, int ystride,
int nhb, int nvb, int coeff_shift) {
@@ -32,8 +32,8 @@
return sum / (double)(1 << 2 * coeff_shift);
}
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
- VP10_COMMON *cm, MACROBLOCKD *xd) {
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+ AV1_COMMON *cm, MACROBLOCKD *xd) {
int r, c;
int sbr, sbc;
int nhsb, nvsb;
@@ -52,11 +52,11 @@
int best_level;
int global_level;
double best_tot_mse = 1e15;
- int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
- src = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
- ref_coeff = vpx_malloc(sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * 64);
- bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+ int coeff_shift = AOMMAX(cm->bit_depth - 8, 0);
+ src = aom_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+ ref_coeff = aom_malloc(sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * 64);
+ bskip = aom_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+ av1_setup_dst_planes(xd->plane, frame, 0, 0);
for (pli = 0; pli < 3; pli++) {
dec[pli] = xd->plane[pli].subsampling_x;
bsize[pli] = 8 >> dec[pli];
@@ -64,7 +64,7 @@
stride = bsize[0] * cm->mi_cols;
for (r = 0; r < bsize[0] * cm->mi_rows; ++r) {
for (c = 0; c < bsize[0] * cm->mi_cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
src[r * stride + c] = CONVERT_TO_SHORTPTR(
xd->plane[0].dst.buf)[r * xd->plane[0].dst.stride + c];
@@ -75,7 +75,7 @@
src[r * stride + c] =
xd->plane[0].dst.buf[r * xd->plane[0].dst.stride + c];
ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
@@ -89,15 +89,15 @@
}
nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
- mse = vpx_malloc(nvsb * nhsb * sizeof(*mse));
+ mse = aom_malloc(nvsb * nhsb * sizeof(*mse));
for (sbr = 0; sbr < nvsb; sbr++) {
for (sbc = 0; sbc < nhsb; sbc++) {
int best_mse = 1000000000;
int nvb, nhb;
int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
best_level = 0;
- nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
- nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+ nhb = AOMMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+ nvb = AOMMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
for (level = 0; level < 64; level++) {
int threshold;
threshold = level << coeff_shift;
@@ -169,9 +169,9 @@
if (tot_mse[level] < tot_mse[best_level]) best_level = level;
}
#endif
- vpx_free(src);
- vpx_free(ref_coeff);
- vpx_free(bskip);
- vpx_free(mse);
+ aom_free(src);
+ aom_free(ref_coeff);
+ aom_free(bskip);
+ aom_free(mse);
return best_level;
}
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
index c4e9b7d..12254bd 100644
--- a/av1/encoder/picklpf.c
+++ b/av1/encoder/picklpf.c
@@ -11,11 +11,11 @@
#include <assert.h>
#include <limits.h>
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "aom_dsp/psnr.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/loopfilter.h"
@@ -26,7 +26,7 @@
#include "av1/encoder/picklpf.h"
#include "av1/encoder/quantize.h"
-int vp10_get_max_filter_level(const VP10_COMP *cpi) {
+int av1_get_max_filter_level(const AV1_COMP *cpi) {
if (cpi->oxcf.pass == 2) {
return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4
: MAX_LOOP_FILTER;
@@ -36,46 +36,46 @@
}
static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
- VP10_COMP *const cpi, int filt_level,
+ AV1_COMP *const cpi, int filt_level,
int partial_frame) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int64_t filt_err;
#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
- vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
- 1, partial_frame);
+ av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, 1,
+ partial_frame);
#else
if (cpi->num_workers > 1)
- vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
- filt_level, 1, partial_frame, cpi->workers,
- cpi->num_workers, &cpi->lf_row_sync);
+ av1_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+ filt_level, 1, partial_frame, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
- vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
- 1, partial_frame);
+ av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+ 1, partial_frame);
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+ filt_err = aom_highbd_get_y_sse(sd, cm->frame_to_show);
} else {
- filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+ filt_err = aom_get_y_sse(sd, cm->frame_to_show);
}
#else
- filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ filt_err = aom_get_y_sse(sd, cm->frame_to_show);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Re-instate the unfiltered frame
- vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+ aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
return filt_err;
}
-int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- int partial_frame, double *best_cost_ret) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+ int partial_frame, double *best_cost_ret) {
+ const AV1_COMMON *const cm = &cpi->common;
const struct loopfilter *const lf = &cm->lf;
const int min_filter_level = 0;
- const int max_filter_level = vp10_get_max_filter_level(cpi);
+ const int max_filter_level = av1_get_max_filter_level(cpi);
int filt_direction = 0;
int64_t best_err;
int filt_best;
@@ -92,15 +92,15 @@
memset(ss_err, 0xFF, sizeof(ss_err));
// Make a copy of the unfiltered / processed recon buffer
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
best_err = try_filter_frame(sd, cpi, filt_mid, partial_frame);
filt_best = filt_mid;
ss_err[filt_mid] = best_err;
while (filter_step > 0) {
- const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
- const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+ const int filt_high = AOMMIN(filt_mid + filter_step, max_filter_level);
+ const int filt_low = AOMMAX(filt_mid - filter_step, min_filter_level);
// Bias against raising loop filter in favor of lowering it.
int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
@@ -159,9 +159,9 @@
}
#if !CONFIG_LOOP_RESTORATION
-void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- LPF_PICK_METHOD method) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+ LPF_PICK_METHOD method) {
+ AV1_COMMON *const cm = &cpi->common;
struct loopfilter *const lf = &cm->lf;
lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
@@ -170,35 +170,35 @@
lf->filter_level = 0;
} else if (method >= LPF_PICK_FROM_Q) {
const int min_filter_level = 0;
- const int max_filter_level = vp10_get_max_filter_level(cpi);
- const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+ const int max_filter_level = av1_get_max_filter_level(cpi);
+ const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
// These values were determined by linear fitting the result of the
// searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
break;
default:
assert(0 &&
- "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
- "or VPX_BITS_12");
+ "bit_depth should be AOM_BITS_8, AOM_BITS_10 "
+ "or AOM_BITS_12");
return;
}
#else
int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
} else {
- lf->filter_level = vp10_search_filter_level(
+ lf->filter_level = av1_search_filter_level(
sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, NULL);
}
diff --git a/av1/encoder/picklpf.h b/av1/encoder/picklpf.h
index cd8afc6..75fdb24 100644
--- a/av1/encoder/picklpf.h
+++ b/av1/encoder/picklpf.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_PICKLPF_H_
-#define VP10_ENCODER_PICKLPF_H_
+#ifndef AV1_ENCODER_PICKLPF_H_
+#define AV1_ENCODER_PICKLPF_H_
#ifdef __cplusplus
extern "C" {
@@ -18,14 +18,14 @@
#include "av1/encoder/encoder.h"
struct yv12_buffer_config;
-struct VP10_COMP;
-int vp10_get_max_filter_level(const VP10_COMP *cpi);
-int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- int partial_frame, double *err);
-void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
- struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+struct AV1_COMP;
+int av1_get_max_filter_level(const AV1_COMP *cpi);
+int av1_search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+ int partial_frame, double *err);
+void av1_pick_filter_level(const struct yv12_buffer_config *sd,
+ struct AV1_COMP *cpi, LPF_PICK_METHOD method);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_PICKLPF_H_
+#endif // AV1_ENCODER_PICKLPF_H_
diff --git a/av1/encoder/pickrst.c b/av1/encoder/pickrst.c
index b6ee6f0..22bd019 100644
--- a/av1/encoder/pickrst.c
+++ b/av1/encoder/pickrst.c
@@ -13,11 +13,11 @@
#include <limits.h>
#include <math.h>
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "aom_dsp/psnr.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/onyxc_int.h"
@@ -29,59 +29,59 @@
#include "av1/encoder/quantize.h"
static int64_t try_restoration_frame(const YV12_BUFFER_CONFIG *sd,
- VP10_COMP *const cpi, RestorationInfo *rsi,
+ AV1_COMP *const cpi, RestorationInfo *rsi,
int partial_frame) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int64_t filt_err;
- vp10_loop_restoration_frame(cm->frame_to_show, cm, rsi, 1, partial_frame);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_loop_restoration_frame(cm->frame_to_show, cm, rsi, 1, partial_frame);
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+ filt_err = aom_highbd_get_y_sse(sd, cm->frame_to_show);
} else {
- filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+ filt_err = aom_get_y_sse(sd, cm->frame_to_show);
}
#else
- filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ filt_err = aom_get_y_sse(sd, cm->frame_to_show);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Re-instate the unfiltered frame
- vpx_yv12_copy_y(&cpi->last_frame_db, cm->frame_to_show);
+ aom_yv12_copy_y(&cpi->last_frame_db, cm->frame_to_show);
return filt_err;
}
-static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
int filter_level, int partial_frame,
int *bilateral_level, double *best_cost_ret) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int i, j, tile_idx;
int64_t err;
int bits;
double cost, best_cost, cost_norestore, cost_bilateral;
- const int bilateral_level_bits = vp10_bilateral_level_bits(&cpi->common);
+ const int bilateral_level_bits = av1_bilateral_level_bits(&cpi->common);
const int bilateral_levels = 1 << bilateral_level_bits;
MACROBLOCK *x = &cpi->td.mb;
RestorationInfo rsi;
const int ntiles =
- vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+ av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
// Make a copy of the unfiltered / processed recon buffer
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
- vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
- 1, partial_frame);
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+ av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+ 1, partial_frame);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
// RD cost associated with no restoration
rsi.restoration_type = RESTORE_NONE;
err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
bits = 0;
- cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ cost_norestore =
+ RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
best_cost = cost_norestore;
// RD cost associated with bilateral filtering
rsi.restoration_type = RESTORE_BILATERAL;
rsi.bilateral_level =
- (int *)vpx_malloc(sizeof(*rsi.bilateral_level) * ntiles);
+ (int *)aom_malloc(sizeof(*rsi.bilateral_level) * ntiles);
assert(rsi.bilateral_level != NULL);
for (j = 0; j < ntiles; ++j) bilateral_level[j] = -1;
@@ -98,7 +98,7 @@
// when RDCOST is used. However below we just scale both in the correct
// ratios appropriately but not exactly by these values.
cost = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ (bits << (AV1_PROB_COST_SHIFT - 4)), err);
if (cost < best_cost) {
bilateral_level[tile_idx] = i;
best_cost = cost;
@@ -116,12 +116,12 @@
}
}
err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
- cost_bilateral = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ cost_bilateral =
+ RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
- vpx_free(rsi.bilateral_level);
+ aom_free(rsi.bilateral_level);
- vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+ aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
if (cost_bilateral < cost_norestore) {
if (best_cost_ret) *best_cost_ret = cost_bilateral;
return 1;
@@ -132,13 +132,13 @@
}
static int search_filter_bilateral_level(const YV12_BUFFER_CONFIG *sd,
- VP10_COMP *cpi, int partial_frame,
+ AV1_COMP *cpi, int partial_frame,
int *filter_best, int *bilateral_level,
double *best_cost_ret) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const struct loopfilter *const lf = &cm->lf;
const int min_filter_level = 0;
- const int max_filter_level = vp10_get_max_filter_level(cpi);
+ const int max_filter_level = av1_get_max_filter_level(cpi);
int filt_direction = 0;
int filt_best;
double best_err;
@@ -147,7 +147,7 @@
int bilateral_success[MAX_LOOP_FILTER + 1];
const int ntiles =
- vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+ av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
// Start the search at the previous frame filter level unless it is now out of
// range.
@@ -157,7 +157,7 @@
// Set each entry to -1
for (i = 0; i <= MAX_LOOP_FILTER; ++i) ss_err[i] = -1.0;
- tmp_level = (int *)vpx_malloc(sizeof(*tmp_level) * ntiles);
+ tmp_level = (int *)aom_malloc(sizeof(*tmp_level) * ntiles);
bilateral_success[filt_mid] = search_bilateral_level(
sd, cpi, filt_mid, partial_frame, tmp_level, &best_err);
@@ -168,8 +168,8 @@
}
while (filter_step > 0) {
- const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
- const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+ const int filt_high = AOMMIN(filt_mid + filter_step, max_filter_level);
+ const int filt_low = AOMMAX(filt_mid - filter_step, min_filter_level);
// Bias against raising loop filter in favor of lowering it.
double bias = (best_err / (1 << (15 - (filt_mid / 8)))) * filter_step;
@@ -227,7 +227,7 @@
}
}
- vpx_free(tmp_level);
+ aom_free(tmp_level);
// Update best error
best_err = ss_err[filt_best];
@@ -282,7 +282,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static double find_average_highbd(uint16_t *src, int h_start, int h_end,
int v_start, int v_end, int stride) {
uint64_t sum = 0;
@@ -329,7 +329,7 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Solves Ax = b, where x and b are column vectors
static int linsolve(int n, double *A, int stride, double *b, double *x) {
@@ -544,12 +544,12 @@
fi[2] = CLIP(fi[2], WIENER_FILT_TAP2_MINV, WIENER_FILT_TAP2_MAXV);
}
-static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, VP10_COMP *cpi,
+static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi,
int filter_level, int partial_frame,
int (*vfilter)[RESTORATION_HALFWIN],
int (*hfilter)[RESTORATION_HALFWIN],
int *process_tile, double *best_cost_ret) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RestorationInfo rsi;
int64_t err;
int bits;
@@ -569,36 +569,36 @@
int i, j;
const int tilesize = WIENER_TILESIZE;
- const int ntiles = vp10_get_restoration_ntiles(tilesize, width, height);
+ const int ntiles = av1_get_restoration_ntiles(tilesize, width, height);
assert(width == dgd->y_crop_width);
assert(height == dgd->y_crop_height);
assert(width == src->y_crop_width);
assert(height == src->y_crop_height);
- vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
- &tile_height, &nhtiles, &nvtiles);
+ av1_get_restoration_tile_size(tilesize, width, height, &tile_width,
+ &tile_height, &nhtiles, &nvtiles);
// Make a copy of the unfiltered / processed recon buffer
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
- vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
- 1, partial_frame);
- vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+ av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+ 1, partial_frame);
+ aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
rsi.restoration_type = RESTORE_NONE;
err = try_restoration_frame(src, cpi, &rsi, partial_frame);
bits = 0;
- cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ cost_norestore =
+ RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
rsi.restoration_type = RESTORE_WIENER;
rsi.vfilter =
- (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.vfilter) * ntiles);
+ (int(*)[RESTORATION_HALFWIN])aom_malloc(sizeof(*rsi.vfilter) * ntiles);
assert(rsi.vfilter != NULL);
rsi.hfilter =
- (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.hfilter) * ntiles);
+ (int(*)[RESTORATION_HALFWIN])aom_malloc(sizeof(*rsi.hfilter) * ntiles);
assert(rsi.hfilter != NULL);
- rsi.wiener_level = (int *)vpx_malloc(sizeof(*rsi.wiener_level) * ntiles);
+ rsi.wiener_level = (int *)aom_malloc(sizeof(*rsi.wiener_level) * ntiles);
assert(rsi.wiener_level != NULL);
// Compute best Wiener filters for each tile
@@ -614,12 +614,12 @@
v_end = (vtile_idx < nvtiles - 1) ? ((vtile_idx + 1) * tile_height)
: (height - RESTORATION_HALFWIN);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth)
compute_stats_highbd(dgd->y_buffer, src->y_buffer, h_start, h_end,
v_start, v_end, dgd_stride, src_stride, M, H);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
compute_stats(dgd->y_buffer, src->y_buffer, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H);
@@ -650,7 +650,7 @@
err = try_restoration_frame(src, cpi, &rsi, partial_frame);
bits = 1 + WIENER_FILT_BITS;
cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ (bits << (AV1_PROB_COST_SHIFT - 4)), err);
if (cost_wiener >= cost_norestore) process_tile[tile_idx] = 0;
}
// Cost for Wiener filtering
@@ -660,8 +660,8 @@
rsi.wiener_level[tile_idx] = process_tile[tile_idx];
}
err = try_restoration_frame(src, cpi, &rsi, partial_frame);
- cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
- (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+ cost_wiener =
+ RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
if (process_tile[tile_idx] == 0) continue;
@@ -671,11 +671,11 @@
}
}
- vpx_free(rsi.vfilter);
- vpx_free(rsi.hfilter);
- vpx_free(rsi.wiener_level);
+ aom_free(rsi.vfilter);
+ aom_free(rsi.hfilter);
+ aom_free(rsi.wiener_level);
- vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+ aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
if (cost_wiener < cost_norestore) {
if (best_cost_ret) *best_cost_ret = cost_wiener;
return 1;
@@ -685,9 +685,9 @@
}
}
-void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- LPF_PICK_METHOD method) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+ LPF_PICK_METHOD method) {
+ AV1_COMMON *const cm = &cpi->common;
struct loopfilter *const lf = &cm->lf;
int wiener_success = 0;
int bilateral_success = 0;
@@ -697,20 +697,20 @@
int ntiles;
ntiles =
- vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+ av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
cm->rst_info.bilateral_level =
- (int *)vpx_realloc(cm->rst_info.bilateral_level,
+ (int *)aom_realloc(cm->rst_info.bilateral_level,
sizeof(*cm->rst_info.bilateral_level) * ntiles);
assert(cm->rst_info.bilateral_level != NULL);
- ntiles = vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
- cm->rst_info.wiener_level = (int *)vpx_realloc(
+ ntiles = av1_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+ cm->rst_info.wiener_level = (int *)aom_realloc(
cm->rst_info.wiener_level, sizeof(*cm->rst_info.wiener_level) * ntiles);
assert(cm->rst_info.wiener_level != NULL);
- cm->rst_info.vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ cm->rst_info.vfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
cm->rst_info.vfilter, sizeof(*cm->rst_info.vfilter) * ntiles);
assert(cm->rst_info.vfilter != NULL);
- cm->rst_info.hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ cm->rst_info.hfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
cm->rst_info.hfilter, sizeof(*cm->rst_info.hfilter) * ntiles);
assert(cm->rst_info.hfilter != NULL);
@@ -721,31 +721,31 @@
cm->rst_info.restoration_type = RESTORE_NONE;
} else if (method >= LPF_PICK_FROM_Q) {
const int min_filter_level = 0;
- const int max_filter_level = vp10_get_max_filter_level(cpi);
- const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+ const int max_filter_level = av1_get_max_filter_level(cpi);
+ const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
// These values were determined by linear fitting the result of the
// searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
break;
default:
assert(0 &&
- "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
- "or VPX_BITS_12");
+ "bit_depth should be AOM_BITS_8, AOM_BITS_10 "
+ "or AOM_BITS_12");
return;
}
#else
int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
bilateral_success = search_bilateral_level(
@@ -771,7 +771,7 @@
bilateral_success = search_filter_bilateral_level(
sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &blf_filter_level,
cm->rst_info.bilateral_level, &cost_bilateral);
- lf->filter_level = vp10_search_filter_level(
+ lf->filter_level = av1_search_filter_level(
sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &cost_norestore);
wiener_success = search_wiener_filter(
sd, cpi, lf->filter_level, method == LPF_PICK_FROM_SUBIMAGE,
@@ -794,15 +794,15 @@
// wiener_success);
}
if (cm->rst_info.restoration_type != RESTORE_BILATERAL) {
- vpx_free(cm->rst_info.bilateral_level);
+ aom_free(cm->rst_info.bilateral_level);
cm->rst_info.bilateral_level = NULL;
}
if (cm->rst_info.restoration_type != RESTORE_WIENER) {
- vpx_free(cm->rst_info.vfilter);
+ aom_free(cm->rst_info.vfilter);
cm->rst_info.vfilter = NULL;
- vpx_free(cm->rst_info.hfilter);
+ aom_free(cm->rst_info.hfilter);
cm->rst_info.hfilter = NULL;
- vpx_free(cm->rst_info.wiener_level);
+ aom_free(cm->rst_info.wiener_level);
cm->rst_info.wiener_level = NULL;
}
}
diff --git a/av1/encoder/pickrst.h b/av1/encoder/pickrst.h
index 6d94cef..7ddda43 100644
--- a/av1/encoder/pickrst.h
+++ b/av1/encoder/pickrst.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_PICKRST_H_
-#define VP10_ENCODER_PICKRST_H_
+#ifndef AV1_ENCODER_PICKRST_H_
+#define AV1_ENCODER_PICKRST_H_
#ifdef __cplusplus
extern "C" {
@@ -18,13 +18,13 @@
#include "av1/encoder/encoder.h"
struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- LPF_PICK_METHOD method);
+void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+ LPF_PICK_METHOD method);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_PICKRST_H_
+#endif // AV1_ENCODER_PICKRST_H_
diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c
index ed8a04b..902a449 100644
--- a/av1/encoder/quantize.c
+++ b/av1/encoder/quantize.c
@@ -9,9 +9,9 @@
*/
#include <math.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/quant_common.h"
@@ -44,7 +44,7 @@
q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
}
if (q) {
- *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+ *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -77,8 +77,8 @@
}
if (q) {
*dqcoeff_ptr = ROUND_POWER_OF_TWO(
- vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
- // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+ av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+ // *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
// (1 + logsizeby32);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
@@ -109,7 +109,7 @@
((((int64_t)tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
}
if (q) {
- *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+ *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -143,8 +143,8 @@
}
if (q) {
*dqcoeff_ptr = ROUND_POWER_OF_TWO(
- vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
- // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+ av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+ // *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
// (1 + logsizeby32);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
@@ -332,34 +332,14 @@
}
#endif // CONFIG_NEW_QUANT
-void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+void av1_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
*eob_ptr = 0;
}
-void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
- const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
- const scan_order *sc, const QUANT_PARAM *qparam) {
- // obsolete skip_block
- const int skip_block = 0;
-
- if (qparam->log_scale == 0) {
- vp10_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
- pd->dequant, eob_ptr, sc->scan, sc->iscan);
- } else {
- vp10_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin,
- p->round_fp, p->quant_fp, p->quant_shift, qcoeff_ptr,
- dqcoeff_ptr, pd->dequant, eob_ptr, sc->scan,
- sc->iscan);
- }
-}
-
-void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
const MACROBLOCKD_PLANE *pd,
tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
@@ -368,50 +348,55 @@
const int skip_block = 0;
if (qparam->log_scale == 0) {
- vpx_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round, p->quant,
+ av1_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+ pd->dequant, eob_ptr, sc->scan, sc->iscan);
+ } else {
+ av1_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+ pd->dequant, eob_ptr, sc->scan, sc->iscan);
+ }
+}
+
+void av1_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd, tran_low_t *dqcoeff_ptr,
+ uint16_t *eob_ptr, const scan_order *sc,
+ const QUANT_PARAM *qparam) {
+ // obsolete skip_block
+ const int skip_block = 0;
+
+ if (qparam->log_scale == 0) {
+ aom_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff_ptr, dqcoeff_ptr, pd->dequant,
eob_ptr, sc->scan, sc->iscan);
} else {
- vpx_quantize_b_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+ aom_quantize_b_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
pd->dequant, eob_ptr, sc->scan, sc->iscan);
}
}
-void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
- const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
- const scan_order *sc, const QUANT_PARAM *qparam) {
+void av1_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc, const QUANT_PARAM *qparam) {
// obsolete skip_block
const int skip_block = 0;
(void)sc;
if (qparam->log_scale == 0) {
- vpx_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+ aom_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr, pd->dequant[0],
eob_ptr);
} else {
- vpx_quantize_dc_32x32(coeff_ptr, skip_block, p->round, p->quant_fp[0],
+ aom_quantize_dc_32x32(coeff_ptr, skip_block, p->round, p->quant_fp[0],
qcoeff_ptr, dqcoeff_ptr, pd->dequant[0], eob_ptr);
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_facade(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
- tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
- const QUANT_PARAM *qparam) {
- // obsolete skip_block
- const int skip_block = 0;
-
- vp10_highbd_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
- pd->dequant, eob_ptr, sc->scan, sc->iscan,
- qparam->log_scale);
-}
-
-void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_facade(const tran_low_t *coeff_ptr,
intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
tran_low_t *qcoeff_ptr,
const MACROBLOCKD_PLANE *pd,
@@ -421,25 +406,43 @@
// obsolete skip_block
const int skip_block = 0;
- vp10_highbd_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+ av1_highbd_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
pd->dequant, eob_ptr, sc->scan, sc->iscan,
qparam->log_scale);
}
-void vp10_highbd_quantize_dc_facade(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
- tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
- const QUANT_PARAM *qparam) {
+void av1_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+ intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+ tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc,
+ const QUANT_PARAM *qparam) {
+ // obsolete skip_block
+ const int skip_block = 0;
+
+ av1_highbd_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+ pd->dequant, eob_ptr, sc->scan, sc->iscan,
+ qparam->log_scale);
+}
+
+void av1_highbd_quantize_dc_facade(const tran_low_t *coeff_ptr,
+ intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+ tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc,
+ const QUANT_PARAM *qparam) {
// obsolete skip_block
const int skip_block = 0;
(void)sc;
- vp10_highbd_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
- p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr,
- pd->dequant[0], eob_ptr, qparam->log_scale);
+ av1_highbd_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+ p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr,
+ pd->dequant[0], eob_ptr, qparam->log_scale);
}
#if CONFIG_NEW_QUANT
@@ -464,7 +467,7 @@
q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
}
if (q) {
- *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+ *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -493,7 +496,7 @@
q = NUQ_KNOTS + (((tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
}
if (q) {
- *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+ *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -526,7 +529,7 @@
}
if (q) {
*dqcoeff_ptr = ROUND_POWER_OF_TWO(
- vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+ av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -559,7 +562,7 @@
}
if (q) {
*dqcoeff_ptr = ROUND_POWER_OF_TWO(
- vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+ av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
*qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
*dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
} else {
@@ -748,20 +751,20 @@
*eob_ptr = eob + 1;
}
#endif // CONFIG_NEW_QUANT
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan,
- const int16_t *iscan
+void av1_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan
#if CONFIG_AOM_QM
- ,
- const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+ ,
+ const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
#endif
- ) {
+ ) {
int i, eob = -1;
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
@@ -806,19 +809,19 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan,
#if CONFIG_AOM_QM
- const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+ const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
#endif
- int log_scale) {
+ int log_scale) {
int i;
int eob = -1;
const int scale = 1 << log_scale;
@@ -865,23 +868,22 @@
*eob_ptr = eob + 1;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(jingning) Refactor this file and combine functions with similar
// operations.
-void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan
+void av1_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan
#if CONFIG_AOM_QM
- ,
- const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+ ,
+ const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
#endif
- ) {
+ ) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
@@ -931,19 +933,18 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan,
#if CONFIG_AOM_QM
- const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+ const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
#endif
- int log_scale) {
+ int log_scale) {
int i, non_zero_count = (int)n_coeffs, eob = -1;
int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
int round[2] = { round_ptr[0], round_ptr[1] };
@@ -1021,12 +1022,12 @@
}
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
- int skip_block, const int16_t *round_ptr,
- const int16_t quant, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
- uint16_t *eob_ptr, const int log_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+ int skip_block, const int16_t *round_ptr,
+ const int16_t quant, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+ uint16_t *eob_ptr, const int log_scale) {
int eob = -1;
memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
@@ -1056,15 +1057,15 @@
*shift = 1 << (16 - l);
}
-static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) {
- const int quant = vp10_dc_quant(q, 0, bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+static int get_qzbin_factor(int q, aom_bit_depth_t bit_depth) {
+ const int quant = av1_dc_quant(q, 0, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
- case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
- case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+ case AOM_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+ case AOM_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+ case AOM_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
@@ -1073,8 +1074,8 @@
#endif
}
-void vp10_init_quantizer(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_init_quantizer(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
QUANTS *const quants = &cpi->quants;
int i, q, quant;
#if CONFIG_NEW_QUANT
@@ -1088,8 +1089,8 @@
for (i = 0; i < 2; ++i) {
int qrounding_factor_fp = 64;
// y
- quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
- : vp10_ac_quant(q, 0, cm->bit_depth);
+ quant = i == 0 ? av1_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
+ : av1_ac_quant(q, 0, cm->bit_depth);
invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant);
quants->y_quant_fp[q][i] = (1 << 16) / quant;
quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
@@ -1098,8 +1099,8 @@
cpi->y_dequant[q][i] = quant;
// uv
- quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
- : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
+ quant = i == 0 ? av1_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
+ : av1_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
quant);
quants->uv_quant_fp[q][i] = (1 << 16) / quant;
@@ -1114,11 +1115,11 @@
for (i = 0; i < COEF_BANDS; i++) {
const int quant = cpi->y_dequant[q][i != 0];
const int uvquant = cpi->uv_dequant[q][i != 0];
- vp10_get_dequant_val_nuq(quant, q, i, cpi->y_dequant_val_nuq[dq][q][i],
- quants->y_cuml_bins_nuq[dq][q][i], dq);
- vp10_get_dequant_val_nuq(uvquant, q, i,
- cpi->uv_dequant_val_nuq[dq][q][i],
- quants->uv_cuml_bins_nuq[dq][q][i], dq);
+ av1_get_dequant_val_nuq(quant, q, i, cpi->y_dequant_val_nuq[dq][q][i],
+ quants->y_cuml_bins_nuq[dq][q][i], dq);
+ av1_get_dequant_val_nuq(uvquant, q, i,
+ cpi->uv_dequant_val_nuq[dq][q][i],
+ quants->uv_cuml_bins_nuq[dq][q][i], dq);
}
}
#endif // CONFIG_NEW_QUANT
@@ -1143,13 +1144,13 @@
}
}
-void vp10_init_plane_quantizers(const VP10_COMP *cpi, MACROBLOCK *x,
- int segment_id) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_init_plane_quantizers(const AV1_COMP *cpi, MACROBLOCK *x,
+ int segment_id) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const QUANTS *const quants = &cpi->quants;
- const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
- const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
+ const int qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ const int rdmult = av1_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
int i;
#if CONFIG_AOM_QM
int minqm = cm->min_qmlevel;
@@ -1218,17 +1219,17 @@
set_error_per_bit(x, rdmult);
- vp10_initialize_me_consts(cpi, x, x->q_index);
+ av1_initialize_me_consts(cpi, x, x->q_index);
}
-void vp10_frame_init_quantizer(VP10_COMP *cpi) {
+void av1_frame_init_quantizer(AV1_COMP *cpi) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
- vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+ av1_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
}
-void vp10_set_quantizer(VP10_COMMON *cm, int q) {
- // quantizer has to be reinitialized with vp10_init_quantizer() if any
+void av1_set_quantizer(AV1_COMMON *cm, int q) {
+ // quantizer has to be reinitialized with av1_init_quantizer() if any
// delta_q changes.
cm->base_qindex = q;
cm->y_dc_delta_q = 0;
@@ -1246,11 +1247,11 @@
208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
};
-int vp10_quantizer_to_qindex(int quantizer) {
+int av1_quantizer_to_qindex(int quantizer) {
return quantizer_to_qindex[quantizer];
}
-int vp10_qindex_to_quantizer(int qindex) {
+int av1_qindex_to_quantizer(int qindex) {
int quantizer;
for (quantizer = 0; quantizer < 64; ++quantizer)
diff --git a/av1/encoder/quantize.h b/av1/encoder/quantize.h
index 6b1e739..faf26b0 100644
--- a/av1/encoder/quantize.h
+++ b/av1/encoder/quantize.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_QUANTIZE_H_
-#define VP10_ENCODER_QUANTIZE_H_
+#ifndef AV1_ENCODER_QUANTIZE_H_
+#define AV1_ENCODER_QUANTIZE_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "av1/common/quant_common.h"
#include "av1/common/scan.h"
#include "av1/encoder/block.h"
@@ -22,13 +22,13 @@
typedef struct QUANT_PARAM { int log_scale; } QUANT_PARAM;
-typedef void (*VP10_QUANT_FACADE)(const tran_low_t *coeff_ptr,
- intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
- tran_low_t *qcoeff_ptr,
- const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
- const scan_order *sc,
- const QUANT_PARAM *qparam);
+typedef void (*AV1_QUANT_FACADE)(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const MACROBLOCK_PLANE *p,
+ tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc,
+ const QUANT_PARAM *qparam);
typedef struct {
#if CONFIG_NEW_QUANT
@@ -58,42 +58,42 @@
DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
} QUANTS;
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
-void vp10_frame_init_quantizer(struct VP10_COMP *cpi);
+void av1_frame_init_quantizer(struct AV1_COMP *cpi);
-void vp10_init_plane_quantizers(const struct VP10_COMP *cpi, MACROBLOCK *x,
- int segment_id);
+void av1_init_plane_quantizers(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ int segment_id);
-void vp10_init_quantizer(struct VP10_COMP *cpi);
+void av1_init_quantizer(struct AV1_COMP *cpi);
-void vp10_set_quantizer(struct VP10Common *cm, int q);
+void av1_set_quantizer(struct AV1Common *cm, int q);
-int vp10_quantizer_to_qindex(int quantizer);
+int av1_quantizer_to_qindex(int quantizer);
-int vp10_qindex_to_quantizer(int qindex);
+int av1_qindex_to_quantizer(int qindex);
-void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+void av1_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
-void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
- const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
- const scan_order *sc, const QUANT_PARAM *qparam);
-
-void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
const MACROBLOCKD_PLANE *pd,
tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
const scan_order *sc, const QUANT_PARAM *qparam);
-void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
- const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
- const scan_order *sc, const QUANT_PARAM *qparam);
+void av1_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd, tran_low_t *dqcoeff_ptr,
+ uint16_t *eob_ptr, const scan_order *sc,
+ const QUANT_PARAM *qparam);
+
+void av1_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc, const QUANT_PARAM *qparam);
#if CONFIG_NEW_QUANT
void quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
@@ -123,14 +123,8 @@
uint16_t *eob_ptr);
#endif // CONFIG_NEW_QUANT
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_facade(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
- tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
- const QUANT_PARAM *qparam);
-
-void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_facade(const tran_low_t *coeff_ptr,
intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
tran_low_t *qcoeff_ptr,
const MACROBLOCKD_PLANE *pd,
@@ -138,17 +132,27 @@
const scan_order *sc,
const QUANT_PARAM *qparam);
-void vp10_highbd_quantize_dc_facade(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
- tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
- tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
- const QUANT_PARAM *qparam);
+void av1_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+ intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+ tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc,
+ const QUANT_PARAM *qparam);
-void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
- int skip_block, const int16_t *round_ptr,
- const int16_t quant, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
- uint16_t *eob_ptr, const int log_scale);
+void av1_highbd_quantize_dc_facade(const tran_low_t *coeff_ptr,
+ intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+ tran_low_t *qcoeff_ptr,
+ const MACROBLOCKD_PLANE *pd,
+ tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+ const scan_order *sc,
+ const QUANT_PARAM *qparam);
+
+void av1_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+ int skip_block, const int16_t *round_ptr,
+ const int16_t quant, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+ uint16_t *eob_ptr, const int log_scale);
#if CONFIG_NEW_QUANT
void highbd_quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t quant,
@@ -176,10 +180,10 @@
tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
#endif // CONFIG_NEW_QUANT
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_QUANTIZE_H_
+#endif // AV1_ENCODER_QUANTIZE_H_
diff --git a/av1/encoder/ransac.c b/av1/encoder/ransac.c
index e925068..0beaab8 100644
--- a/av1/encoder/ransac.c
+++ b/av1/encoder/ransac.c
@@ -15,7 +15,7 @@
#include <stdlib.h>
#include <assert.h>
-#include "vp10/encoder/ransac.h"
+#include "av1/encoder/ransac.h"
#define MAX_PARAMDIM 9
#define MAX_MINPTS 4
@@ -468,7 +468,7 @@
corners1_int[2 * i + 1] = (int)corners1[i * 2 + 1];
}
- vp10_integerize_model(H, type, &wm);
+ av1_integerize_model(H, type, &wm);
projectpoints(wm.wmmat, corners1_int, image1_coord, npoints, 2, 2, 0, 0);
for (i = 0; i < npoints; ++i) {
diff --git a/av1/encoder/ransac.h b/av1/encoder/ransac.h
index 0b14ecf..c8fbdc8 100644
--- a/av1/encoder/ransac.h
+++ b/av1/encoder/ransac.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_RANSAC_H_
-#define VP10_ENCODER_RANSAC_H_
+#ifndef AV1_ENCODER_RANSAC_H_
+#define AV1_ENCODER_RANSAC_H_
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <memory.h>
-#include "vp10/common/warped_motion.h"
+#include "av1/common/warped_motion.h"
typedef int (*RansacType)(double *matched_points, int npoints,
int *number_of_inliers, int *best_inlier_mask,
@@ -34,4 +34,4 @@
int ransacTranslation(double *matched_points, int npoints,
int *number_of_inliers, int *best_inlier_indices,
double *bestH);
-#endif // VP10_ENCODER_RANSAC_H
+#endif // AV1_ENCODER_RANSAC_H
diff --git a/av1/encoder/ratectrl.c b/av1/encoder/ratectrl.c
index ddd5762..e94571c 100644
--- a/av1/encoder/ratectrl.c
+++ b/av1/encoder/ratectrl.c
@@ -15,8 +15,8 @@
#include <stdlib.h>
#include <string.h>
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
@@ -42,17 +42,17 @@
#define MAX_BPB_FACTOR 50
#define FRAME_OVERHEAD_BITS 200
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define ASSIGN_MINQ_TABLE(bit_depth, name) \
do { \
switch (bit_depth) { \
- case VPX_BITS_8: name = name##_8; break; \
- case VPX_BITS_10: name = name##_10; break; \
- case VPX_BITS_12: name = name##_12; break; \
+ case AOM_BITS_8: name = name##_8; break; \
+ case AOM_BITS_10: name = name##_10; break; \
+ case AOM_BITS_12: name = name##_12; break; \
default: \
assert(0 && \
- "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
- " or VPX_BITS_12"); \
+ "bit_depth should be AOM_BITS_8, AOM_BITS_10" \
+ " or AOM_BITS_12"); \
name = NULL; \
} \
} while (0)
@@ -72,7 +72,7 @@
static int inter_minq_8[QINDEX_RANGE];
static int rtc_minq_8[QINDEX_RANGE];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int kf_low_motion_minq_10[QINDEX_RANGE];
static int kf_high_motion_minq_10[QINDEX_RANGE];
static int arfgf_low_motion_minq_10[QINDEX_RANGE];
@@ -97,16 +97,16 @@
// The formulae were derived from computing a 3rd order polynomial best
// fit to the original data (after plotting real maxq vs minq (not q index))
static int get_minq_index(double maxq, double x3, double x2, double x1,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
int i;
- const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
+ const double minqtarget = AOMMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
// Special case handling to deal with the step from q2.0
// down to lossless mode represented by q 1.0.
if (minqtarget <= 2.0) return 0;
for (i = 0; i < QINDEX_RANGE; i++) {
- if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
+ if (minqtarget <= av1_convert_qindex_to_q(i, bit_depth)) return i;
}
return QINDEX_RANGE - 1;
@@ -114,10 +114,10 @@
static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
int *arfgf_high, int *inter, int *rtc,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
int i;
for (i = 0; i < QINDEX_RANGE; i++) {
- const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
+ const double maxq = av1_convert_qindex_to_q(i, bit_depth);
kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth);
kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth);
@@ -127,42 +127,42 @@
}
}
-void vp10_rc_init_minq_luts(void) {
+void av1_rc_init_minq_luts(void) {
init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8,
arfgf_low_motion_minq_8, arfgf_high_motion_minq_8,
- inter_minq_8, rtc_minq_8, VPX_BITS_8);
-#if CONFIG_VP9_HIGHBITDEPTH
+ inter_minq_8, rtc_minq_8, AOM_BITS_8);
+#if CONFIG_AOM_HIGHBITDEPTH
init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10,
arfgf_low_motion_minq_10, arfgf_high_motion_minq_10,
- inter_minq_10, rtc_minq_10, VPX_BITS_10);
+ inter_minq_10, rtc_minq_10, AOM_BITS_10);
init_minq_luts(kf_low_motion_minq_12, kf_high_motion_minq_12,
arfgf_low_motion_minq_12, arfgf_high_motion_minq_12,
- inter_minq_12, rtc_minq_12, VPX_BITS_12);
+ inter_minq_12, rtc_minq_12, AOM_BITS_12);
#endif
}
// These functions use formulaic calculations to make playing with the
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
-double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
// Convert the index to a real Q value (scaled down to match old Q values)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
- case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
- case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+ case AOM_BITS_8: return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
+ case AOM_BITS_10: return av1_ac_quant(qindex, 0, bit_depth) / 16.0;
+ case AOM_BITS_12: return av1_ac_quant(qindex, 0, bit_depth) / 64.0;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1.0;
}
#else
- return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+ return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
#endif
}
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
- double correction_factor, vpx_bit_depth_t bit_depth) {
- const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor, aom_bit_depth_t bit_depth) {
+ const double q = av1_convert_qindex_to_q(qindex, bit_depth);
int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
assert(correction_factor <= MAX_BPB_FACTOR &&
@@ -173,20 +173,20 @@
return (int)(enumerator * correction_factor / q);
}
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
- double correction_factor,
- vpx_bit_depth_t bit_depth) {
+int av1_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
+ double correction_factor,
+ aom_bit_depth_t bit_depth) {
const int bpm =
- (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
- return VPXMAX(FRAME_OVERHEAD_BITS,
+ (int)(av1_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
+ return AOMMAX(FRAME_OVERHEAD_BITS,
(int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
}
-int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_pframe_target_size(const AV1_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
const int min_frame_target =
- VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+ AOMMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
// Clip the frame target to the minimum setup value.
#if CONFIG_EXT_REFS
if (cpi->rc.is_src_frame_alt_ref) {
@@ -207,27 +207,27 @@
if (oxcf->rc_max_inter_bitrate_pct) {
const int max_rate =
rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
- target = VPXMIN(target, max_rate);
+ target = AOMMIN(target, max_rate);
}
return target;
}
-int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_iframe_target_size(const AV1_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
if (oxcf->rc_max_intra_bitrate_pct) {
const int max_rate =
rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
- target = VPXMIN(target, max_rate);
+ target = AOMMIN(target, max_rate);
}
if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
return target;
}
// Update the buffer level: leaky bucket model.
-static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
- const VP10_COMMON *const cm = &cpi->common;
+static void update_buffer_level(AV1_COMP *cpi, int encoded_frame_size) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
// Non-viewable frames are a special case and are treated as pure overhead.
@@ -243,12 +243,12 @@
rc->bits_off_target += rc->avg_frame_bandwidth - encoded_frame_size;
// Clip the buffer level to the maximum specified buffer size.
- rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
+ rc->bits_off_target = AOMMIN(rc->bits_off_target, rc->maximum_buffer_size);
rc->buffer_level = rc->bits_off_target;
}
-int vp10_rc_get_default_min_gf_interval(int width, int height,
- double framerate) {
+int av1_rc_get_default_min_gf_interval(int width, int height,
+ double framerate) {
// Assume we do not need any constraint lower than 4K 20 fps
static const double factor_safe = 3840 * 2160 * 20.0;
const double factor = width * height * framerate;
@@ -258,7 +258,7 @@
if (factor <= factor_safe)
return default_interval;
else
- return VPXMAX(default_interval,
+ return AOMMAX(default_interval,
(int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5));
// Note this logic makes:
// 4K24: 5
@@ -266,16 +266,16 @@
// 4K60: 12
}
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
- int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
+int av1_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
+ int interval = AOMMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
interval += (interval & 0x01); // Round to even value
- return VPXMAX(interval, min_gf_interval);
+ return AOMMAX(interval, min_gf_interval);
}
-void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
+void av1_rc_init(const AV1EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
int i;
- if (pass == 0 && oxcf->rc_mode == VPX_CBR) {
+ if (pass == 0 && oxcf->rc_mode == AOM_CBR) {
rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
} else {
@@ -312,7 +312,7 @@
rc->ni_frames = 0;
rc->tot_q = 0.0;
- rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
+ rc->avg_q = av1_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
rc->rate_correction_factors[i] = 1.0;
@@ -321,16 +321,16 @@
rc->min_gf_interval = oxcf->min_gf_interval;
rc->max_gf_interval = oxcf->max_gf_interval;
if (rc->min_gf_interval == 0)
- rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+ rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
oxcf->width, oxcf->height, oxcf->init_framerate);
if (rc->max_gf_interval == 0)
- rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+ rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
oxcf->init_framerate, rc->min_gf_interval);
rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
}
-int vp10_rc_drop_frame(VP10_COMP *cpi) {
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+int av1_rc_drop_frame(AV1_COMP *cpi) {
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
if (!oxcf->drop_frames_water_mark) {
@@ -365,7 +365,7 @@
}
}
-static double get_rate_correction_factor(const VP10_COMP *cpi) {
+static double get_rate_correction_factor(const AV1_COMP *cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
double rcf;
@@ -378,7 +378,7 @@
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
!rc->is_src_frame_alt_ref &&
- (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+ (cpi->oxcf.rc_mode != AOM_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
rcf = rc->rate_correction_factors[GF_ARF_STD];
else
rcf = rc->rate_correction_factors[INTER_NORMAL];
@@ -387,7 +387,7 @@
return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
}
-static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
+static void set_rate_correction_factor(AV1_COMP *cpi, double factor) {
RATE_CONTROL *const rc = &cpi->rc;
// Normalize RCF to account for the size-dependent scaling factor.
@@ -404,15 +404,15 @@
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
!rc->is_src_frame_alt_ref &&
- (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+ (cpi->oxcf.rc_mode != AOM_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
rc->rate_correction_factors[GF_ARF_STD] = factor;
else
rc->rate_correction_factors[INTER_NORMAL] = factor;
}
}
-void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_update_rate_correction_factors(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
int correction_factor = 100;
double rate_correction_factor = get_rate_correction_factor(cpi);
double adjustment_limit;
@@ -423,18 +423,18 @@
if (cpi->rc.is_src_frame_alt_ref) return;
// Clear down mmx registers to allow floating point in what follows
- vpx_clear_system_state();
+ aom_clear_system_state();
// Work out how big we would have expected the frame to be at this Q given
// the current correction factor.
// Stay in double to avoid int overflow when values are large
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) {
projected_size_based_on_q =
- vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
+ av1_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
} else {
projected_size_based_on_q =
- vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
- cm->MBs, rate_correction_factor, cm->bit_depth);
+ av1_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex, cm->MBs,
+ rate_correction_factor, cm->bit_depth);
}
// Work out a size correction factor.
if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
@@ -444,7 +444,7 @@
// More heavily damped adjustment used if we have been oscillating either side
// of target.
adjustment_limit =
- 0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+ 0.25 + 0.5 * AOMMIN(1, fabs(log10(0.01 * correction_factor)));
cpi->rc.q_2_frame = cpi->rc.q_1_frame;
cpi->rc.q_1_frame = cm->base_qindex;
@@ -478,9 +478,9 @@
set_rate_correction_factor(cpi, rate_correction_factor);
}
-int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
- int active_best_quality, int active_worst_quality) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_rc_regulate_q(const AV1_COMP *cpi, int target_bits_per_frame,
+ int active_best_quality, int active_worst_quality) {
+ const AV1_COMMON *const cm = &cpi->common;
int q = active_worst_quality;
int last_error = INT_MAX;
int i, target_bits_per_mb, bits_per_mb_at_this_q;
@@ -496,9 +496,9 @@
do {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
bits_per_mb_at_this_q =
- (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
+ (int)av1_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
} else {
- bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+ bits_per_mb_at_this_q = (int)av1_rc_bits_per_mb(
cm->frame_type, i, correction_factor, cm->bit_depth);
}
@@ -516,11 +516,11 @@
// In CBR mode, this makes sure q is between oscillating Qs to prevent
// resonance.
- if (cpi->oxcf.rc_mode == VPX_CBR &&
+ if (cpi->oxcf.rc_mode == AOM_CBR &&
(cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) &&
cpi->rc.q_1_frame != cpi->rc.q_2_frame) {
- q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame),
- VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame));
+ q = clamp(q, AOMMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame),
+ AOMMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame));
}
return q;
}
@@ -541,7 +541,7 @@
}
static int get_kf_active_quality(const RATE_CONTROL *const rc, int q,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
int *kf_low_motion_minq;
int *kf_high_motion_minq;
ASSIGN_MINQ_TABLE(bit_depth, kf_low_motion_minq);
@@ -551,7 +551,7 @@
}
static int get_gf_active_quality(const RATE_CONTROL *const rc, int q,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
int *arfgf_low_motion_minq;
int *arfgf_high_motion_minq;
ASSIGN_MINQ_TABLE(bit_depth, arfgf_low_motion_minq);
@@ -560,7 +560,7 @@
arfgf_low_motion_minq, arfgf_high_motion_minq);
}
-static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_vbr(const AV1_COMP *cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
const unsigned int curr_frame = cpi->common.current_video_frame;
int active_worst_quality;
@@ -578,17 +578,17 @@
: rc->last_q[INTER_FRAME] * 2;
}
}
- return VPXMIN(active_worst_quality, rc->worst_quality);
+ return AOMMIN(active_worst_quality, rc->worst_quality);
}
// Adjust active_worst_quality level based on buffer level.
-static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_cbr(const AV1_COMP *cpi) {
// Adjust active_worst_quality: If buffer is above the optimal/target level,
// bring active_worst_quality down depending on fullness of buffer.
// If buffer is below the optimal level, let the active_worst_quality go from
// ambient Q (at buffer = optimal level) to worst_quality level
// (at buffer = critical level).
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *rc = &cpi->rc;
// Buffer level below which we push active_worst to worst_quality.
int64_t critical_level = rc->optimal_buffer_level >> 3;
@@ -603,10 +603,10 @@
// So for first few frames following key, the qp of that key frame is weighted
// into the active_worst_quality setting.
ambient_qp = (cm->current_video_frame < 5)
- ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+ ? AOMMIN(rc->avg_frame_qindex[INTER_FRAME],
rc->avg_frame_qindex[KEY_FRAME])
: rc->avg_frame_qindex[INTER_FRAME];
- active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4);
+ active_worst_quality = AOMMIN(rc->worst_quality, ambient_qp * 5 / 4);
if (rc->buffer_level > rc->optimal_buffer_level) {
// Adjust down.
// Maximum limit for down adjustment, ~30%.
@@ -637,10 +637,10 @@
return active_worst_quality;
}
-static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_cbr(const AV1_COMP *cpi,
int *bottom_index,
int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
int active_best_quality;
int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
@@ -655,10 +655,10 @@
// based on the ambient Q to reduce the risk of popping.
if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
- double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(
+ double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
} else if (cm->current_video_frame > 0) {
// not first frame of one pass and kf_boost is set
double q_adj_factor = 1.0;
@@ -674,9 +674,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -718,19 +718,19 @@
if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
int qdelta = 0;
- vpx_clear_system_state();
- qdelta = vp10_compute_qdelta_by_rate(
+ aom_clear_system_state();
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
*top_index = active_worst_quality + qdelta;
- *top_index = VPXMAX(*top_index, *bottom_index);
+ *top_index = AOMMAX(*top_index, *bottom_index);
}
// Special case code to try and match quality with forced key frames
if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
- active_worst_quality);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -748,10 +748,10 @@
}
static int get_active_cq_level(const RATE_CONTROL *rc,
- const VP10EncoderConfig *const oxcf) {
+ const AV1EncoderConfig *const oxcf) {
static const double cq_adjust_threshold = 0.1;
int active_cq_level = oxcf->cq_level;
- if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
+ if (oxcf->rc_mode == AOM_CQ && rc->total_target_bits > 0) {
const double x = (double)rc->total_actual_bits / rc->total_target_bits;
if (x < cq_adjust_threshold) {
active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
@@ -760,12 +760,12 @@
return active_cq_level;
}
-static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_vbr(const AV1_COMP *cpi,
int *bottom_index,
int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int cq_level = get_active_cq_level(rc, oxcf);
int active_best_quality;
int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi);
@@ -774,17 +774,17 @@
ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
if (frame_is_intra_only(cm)) {
- if (oxcf->rc_mode == VPX_Q) {
+ if (oxcf->rc_mode == AOM_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
} else if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
- double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(
+ double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
} else {
// not first frame of one pass and kf_boost is set
double q_adj_factor = 1.0;
@@ -800,9 +800,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -816,7 +816,7 @@
q = rc->avg_frame_qindex[KEY_FRAME];
}
// For constrained quality dont allow Q less than the cq level
- if (oxcf->rc_mode == VPX_CQ) {
+ if (oxcf->rc_mode == AOM_CQ) {
if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -824,28 +824,28 @@
// Constrained quality use slightly lower active best.
active_best_quality = active_best_quality * 15 / 16;
- } else if (oxcf->rc_mode == VPX_Q) {
+ } else if (oxcf->rc_mode == AOM_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
int delta_qindex;
if (cpi->refresh_alt_ref_frame)
- delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
else
- delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ delta_qindex = av1_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
} else {
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
}
} else {
- if (oxcf->rc_mode == VPX_Q) {
+ if (oxcf->rc_mode == AOM_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
0.70, 1.0, 0.85, 1.0 };
- int delta_qindex = vp10_compute_qdelta(
+ int delta_qindex = av1_compute_qdelta(
rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
} else {
// Use the lower of active_worst_quality and recent/average Q.
if (cm->current_video_frame > 1)
@@ -854,7 +854,7 @@
active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]];
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == AOM_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
@@ -872,28 +872,28 @@
// Limit Q range for the adaptive loop.
{
int qdelta = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
- qdelta = vp10_compute_qdelta_by_rate(
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- qdelta = vp10_compute_qdelta_by_rate(
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
}
*top_index = active_worst_quality + qdelta;
- *top_index = VPXMAX(*top_index, *bottom_index);
+ *top_index = AOMMAX(*top_index, *bottom_index);
}
- if (oxcf->rc_mode == VPX_Q) {
+ if (oxcf->rc_mode == AOM_Q) {
q = active_best_quality;
// Special case code to try and match quality with forced key frames
} else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
- active_worst_quality);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -910,7 +910,7 @@
return q;
}
-int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
+int av1_frame_type_qdelta(const AV1_COMP *cpi, int rf_level, int q) {
static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = {
1.00, // INTER_NORMAL
#if CONFIG_EXT_REFS
@@ -931,19 +931,19 @@
#else
{ INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME };
#endif // CONFIG_EXT_REFS
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
int qdelta =
- vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
- rate_factor_deltas[rf_level], cm->bit_depth);
+ av1_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+ rate_factor_deltas[rf_level], cm->bit_depth);
return qdelta;
}
#define STATIC_MOTION_THRESH 95
-static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
- int *bottom_index, int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+static int rc_pick_q_and_bounds_two_pass(const AV1_COMP *cpi, int *bottom_index,
+ int *top_index) {
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const GF_GROUP *gf_group = &cpi->twopass.gf_group;
const int cq_level = get_active_cq_level(rc, oxcf);
int active_best_quality;
@@ -962,19 +962,19 @@
int qindex;
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
- qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+ qindex = AOMMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
active_best_quality = qindex;
- last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(
- rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
+ last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(rc, last_boosted_q,
+ last_boosted_q * 1.25, cm->bit_depth);
active_worst_quality =
- VPXMIN(qindex + delta_qindex, active_worst_quality);
+ AOMMIN(qindex + delta_qindex, active_worst_quality);
} else {
qindex = rc->last_boosted_qindex;
- last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(
- rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(rc, last_boosted_q,
+ last_boosted_q * 0.75, cm->bit_depth);
+ active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
}
} else {
// Not forced keyframe.
@@ -995,9 +995,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1011,7 +1011,7 @@
q = active_worst_quality;
}
// For constrained quality dont allow Q less than the cq level
- if (oxcf->rc_mode == VPX_CQ) {
+ if (oxcf->rc_mode == AOM_CQ) {
if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -1019,13 +1019,13 @@
// Constrained quality use slightly lower active best.
active_best_quality = active_best_quality * 15 / 16;
- } else if (oxcf->rc_mode == VPX_Q) {
+ } else if (oxcf->rc_mode == AOM_Q) {
if (!cpi->refresh_alt_ref_frame) {
active_best_quality = cq_level;
} else {
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
- // Modify best quality for second level arfs. For mode VPX_Q this
+ // Modify best quality for second level arfs. For mode AOM_Q this
// becomes the baseline frame q.
if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)
active_best_quality = (active_best_quality + cq_level + 1) / 2;
@@ -1034,14 +1034,14 @@
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
}
} else {
- if (oxcf->rc_mode == VPX_Q) {
+ if (oxcf->rc_mode == AOM_Q) {
active_best_quality = cq_level;
} else {
active_best_quality = inter_minq[active_worst_quality];
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == AOM_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
@@ -1049,7 +1049,7 @@
// Extension to max or min Q if undershoot or overshoot is outside
// the permitted range.
- if ((cpi->oxcf.rc_mode != VPX_Q) &&
+ if ((cpi->oxcf.rc_mode != AOM_Q) &&
(cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD)) {
if (frame_is_intra_only(cm) ||
(!rc->is_src_frame_alt_ref &&
@@ -1064,22 +1064,22 @@
}
}
- vpx_clear_system_state();
+ aom_clear_system_state();
// Static forced key frames Q restrictions dealt with elsewhere.
if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
(cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
- int qdelta = vp10_frame_type_qdelta(
- cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
+ int qdelta = av1_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index],
+ active_worst_quality);
active_worst_quality =
- VPXMAX(active_worst_quality + qdelta, active_best_quality);
+ AOMMAX(active_worst_quality + qdelta, active_best_quality);
}
// Modify active_best_quality for downscaled normal frames.
if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
- int qdelta = vp10_compute_qdelta_by_rate(
+ int qdelta = av1_compute_qdelta_by_rate(
rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
active_best_quality =
- VPXMAX(active_best_quality + qdelta, rc->best_quality);
+ AOMMAX(active_best_quality + qdelta, rc->best_quality);
}
active_best_quality =
@@ -1087,19 +1087,19 @@
active_worst_quality =
clamp(active_worst_quality, active_best_quality, rc->worst_quality);
- if (oxcf->rc_mode == VPX_Q) {
+ if (oxcf->rc_mode == AOM_Q) {
q = active_best_quality;
// Special case code to try and match quality with forced key frames.
} else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
// If static since last kf use better of last boosted and last kf q.
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
- q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+ q = AOMMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
} else {
q = rc->last_boosted_qindex;
}
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
- active_worst_quality);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > active_worst_quality) {
// Special case when we are targeting the max allowed rate.
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1120,11 +1120,11 @@
return q;
}
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
- int *top_index) {
+int av1_rc_pick_q_and_bounds(const AV1_COMP *cpi, int *bottom_index,
+ int *top_index) {
int q;
if (cpi->oxcf.pass == 0) {
- if (cpi->oxcf.rc_mode == VPX_CBR)
+ if (cpi->oxcf.rc_mode == AOM_CBR)
q = rc_pick_q_and_bounds_one_pass_cbr(cpi, bottom_index, top_index);
else
q = rc_pick_q_and_bounds_one_pass_vbr(cpi, bottom_index, top_index);
@@ -1135,24 +1135,24 @@
return q;
}
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
- int *frame_under_shoot_limit,
- int *frame_over_shoot_limit) {
- if (cpi->oxcf.rc_mode == VPX_Q) {
+void av1_rc_compute_frame_size_bounds(const AV1_COMP *cpi, int frame_target,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit) {
+ if (cpi->oxcf.rc_mode == AOM_Q) {
*frame_under_shoot_limit = 0;
*frame_over_shoot_limit = INT_MAX;
} else {
// For very small rate targets where the fractional adjustment
// may be tiny make sure there is at least a minimum range.
const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
- *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
+ *frame_under_shoot_limit = AOMMAX(frame_target - tolerance - 200, 0);
*frame_over_shoot_limit =
- VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
+ AOMMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
}
}
-void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_set_frame_target(AV1_COMP *cpi, int target) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
rc->this_frame_target = target;
@@ -1168,7 +1168,7 @@
((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
}
-static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
+static void update_alt_ref_frame_stats(AV1_COMP *cpi) {
// this frame refreshes means next frames don't unless specified by user
RATE_CONTROL *const rc = &cpi->rc;
rc->frames_since_golden = 0;
@@ -1180,7 +1180,7 @@
rc->source_alt_ref_active = 1;
}
-static void update_golden_frame_stats(VP10_COMP *cpi) {
+static void update_golden_frame_stats(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
#if CONFIG_EXT_REFS
@@ -1221,21 +1221,21 @@
}
}
-void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
- const VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_postencode_update(AV1_COMP *cpi, uint64_t bytes_used) {
+ const AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
const int qindex = cm->base_qindex;
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
- vp10_cyclic_refresh_postencode(cpi);
+ av1_cyclic_refresh_postencode(cpi);
}
// Update rate control heuristics
rc->projected_frame_size = (int)(bytes_used << 3);
// Post encode loop adjustment of Q prediction.
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
// Keep a record of last Q and ambient average Q.
if (cm->frame_type == KEY_FRAME) {
@@ -1249,7 +1249,7 @@
rc->avg_frame_qindex[INTER_FRAME] =
ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
rc->ni_frames++;
- rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ rc->tot_q += av1_convert_qindex_to_q(qindex, cm->bit_depth);
rc->avg_q = rc->tot_q / rc->ni_frames;
// Calculate the average Q for normal inter frames (not key or GFU
// frames).
@@ -1324,7 +1324,7 @@
}
}
-void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
+void av1_rc_postencode_update_drop_frame(AV1_COMP *cpi) {
// Update buffer level with zero size, update frame counters, and return.
update_buffer_level(cpi, 0);
cpi->rc.frames_since_key++;
@@ -1336,7 +1336,7 @@
// Use this macro to turn on/off use of alt-refs in one-pass mode.
#define USE_ALTREF_FOR_ONE_PASS 1
-static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_pframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
static const int af_ratio = 10;
const RATE_CONTROL *const rc = &cpi->rc;
int target;
@@ -1351,18 +1351,18 @@
#else
target = rc->avg_frame_bandwidth;
#endif
- return vp10_rc_clamp_pframe_target_size(cpi, target);
+ return av1_rc_clamp_pframe_target_size(cpi, target);
}
-static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_iframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
static const int kf_ratio = 25;
const RATE_CONTROL *rc = &cpi->rc;
const int target = rc->avg_frame_bandwidth * kf_ratio;
- return vp10_rc_clamp_iframe_target_size(cpi, target);
+ return av1_rc_clamp_iframe_target_size(cpi, target);
}
-void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_vbr_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1396,16 +1396,16 @@
target = calc_iframe_target_size_one_pass_vbr(cpi);
else
target = calc_pframe_target_size_one_pass_vbr(cpi);
- vp10_rc_set_frame_target(cpi, target);
+ av1_rc_set_frame_target(cpi, target);
}
-static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static int calc_pframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
const RATE_CONTROL *rc = &cpi->rc;
const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
int min_frame_target =
- VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
+ AOMMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
int target;
if (oxcf->gf_cbr_boost_pct) {
@@ -1422,23 +1422,23 @@
if (diff > 0) {
// Lower the target bandwidth for this frame.
- const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct);
+ const int pct_low = (int)AOMMIN(diff / one_pct_bits, oxcf->under_shoot_pct);
target -= (target * pct_low) / 200;
} else if (diff < 0) {
// Increase the target bandwidth for this frame.
const int pct_high =
- (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct);
+ (int)AOMMIN(-diff / one_pct_bits, oxcf->over_shoot_pct);
target += (target * pct_high) / 200;
}
if (oxcf->rc_max_inter_bitrate_pct) {
const int max_rate =
rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
- target = VPXMIN(target, max_rate);
+ target = AOMMIN(target, max_rate);
}
- return VPXMAX(min_frame_target, target);
+ return AOMMAX(min_frame_target, target);
}
-static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_iframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
const RATE_CONTROL *rc = &cpi->rc;
int target;
if (cpi->common.current_video_frame == 0) {
@@ -1449,17 +1449,17 @@
int kf_boost = 32;
double framerate = cpi->framerate;
- kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
+ kf_boost = AOMMAX(kf_boost, (int)(2 * framerate - 16));
if (rc->frames_since_key < framerate / 2) {
kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
}
target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
}
- return vp10_rc_clamp_iframe_target_size(cpi, target);
+ return av1_rc_clamp_iframe_target_size(cpi, target);
}
-void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_cbr_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1476,7 +1476,7 @@
}
if (rc->frames_till_gf_update_due == 0) {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
- vp10_cyclic_refresh_set_golden_update(cpi);
+ av1_cyclic_refresh_set_golden_update(cpi);
else
rc->baseline_gf_interval =
(rc->min_gf_interval + rc->max_gf_interval) / 2;
@@ -1491,22 +1491,22 @@
// Any update/change of global cyclic refresh parameters (amount/delta-qp)
// should be done here, before the frame qp is selected.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
- vp10_cyclic_refresh_update_parameters(cpi);
+ av1_cyclic_refresh_update_parameters(cpi);
if (cm->frame_type == KEY_FRAME)
target = calc_iframe_target_size_one_pass_cbr(cpi);
else
target = calc_pframe_target_size_one_pass_cbr(cpi);
- vp10_rc_set_frame_target(cpi, target);
+ av1_rc_set_frame_target(cpi, target);
if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
- cpi->resize_pending = vp10_resize_one_pass_cbr(cpi);
+ cpi->resize_pending = av1_resize_one_pass_cbr(cpi);
else
cpi->resize_pending = 0;
}
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
- vpx_bit_depth_t bit_depth) {
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+ aom_bit_depth_t bit_depth) {
int start_index = rc->worst_quality;
int target_index = rc->worst_quality;
int i;
@@ -1514,34 +1514,34 @@
// Convert the average q value to an index.
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
start_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= qstart) break;
}
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
target_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
}
return target_index - start_index;
}
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
- int qindex, double rate_target_ratio,
- vpx_bit_depth_t bit_depth) {
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+ int qindex, double rate_target_ratio,
+ aom_bit_depth_t bit_depth) {
int target_index = rc->worst_quality;
int i;
// Look up the current projected bits per block for the base index
const int base_bits_per_mb =
- vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
+ av1_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
// Find the target bits per mb based on the base value and given ratio.
const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
- if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
+ if (av1_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
target_bits_per_mb) {
target_index = i;
break;
@@ -1550,12 +1550,12 @@
return target_index - qindex;
}
-void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
- RATE_CONTROL *const rc) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_set_gf_interval_range(const AV1_COMP *const cpi,
+ RATE_CONTROL *const rc) {
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
// Special case code for 1 pass fixed Q mode tests
- if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+ if ((oxcf->pass == 0) && (oxcf->rc_mode == AOM_Q)) {
rc->max_gf_interval = FIXED_GF_INTERVAL;
rc->min_gf_interval = FIXED_GF_INTERVAL;
rc->static_scene_max_gf_interval = FIXED_GF_INTERVAL;
@@ -1564,10 +1564,10 @@
rc->max_gf_interval = oxcf->max_gf_interval;
rc->min_gf_interval = oxcf->min_gf_interval;
if (rc->min_gf_interval == 0)
- rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+ rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
oxcf->width, oxcf->height, cpi->framerate);
if (rc->max_gf_interval == 0)
- rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+ rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
cpi->framerate, rc->min_gf_interval);
// Extended interval for genuinely static scenes
@@ -1582,13 +1582,13 @@
rc->max_gf_interval = rc->static_scene_max_gf_interval;
// Clamp min to max
- rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval);
+ rc->min_gf_interval = AOMMIN(rc->min_gf_interval, rc->max_gf_interval);
}
}
-void vp10_rc_update_framerate(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_update_framerate(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
int vbr_max_bits;
@@ -1597,7 +1597,7 @@
(int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
rc->min_frame_bandwidth =
- VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
+ AOMMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
// A maximum bitrate for a frame is defined.
// The baseline for this aligns with HW implementations that
@@ -1610,14 +1610,14 @@
(int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
100);
rc->max_frame_bandwidth =
- VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
+ AOMMAX(AOMMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
- vp10_rc_set_gf_interval_range(cpi, rc);
+ av1_rc_set_gf_interval_range(cpi, rc);
}
#define VBR_PCT_ADJUSTMENT_LIMIT 50
// For VBR...adjustment to the frame target based on error from previous frames
-static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
+static void vbr_rate_correction(AV1_COMP *cpi, int *this_frame_target) {
RATE_CONTROL *const rc = &cpi->rc;
int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
int max_delta;
@@ -1648,31 +1648,31 @@
// Dont do it for kf,arf,gf or overlay frames.
if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
rc->vbr_bits_off_target_fast) {
- int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target);
+ int one_frame_bits = AOMMAX(rc->avg_frame_bandwidth, *this_frame_target);
int fast_extra_bits;
- fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits);
- fast_extra_bits = (int)VPXMIN(
+ fast_extra_bits = (int)AOMMIN(rc->vbr_bits_off_target_fast, one_frame_bits);
+ fast_extra_bits = (int)AOMMIN(
fast_extra_bits,
- VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
+ AOMMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
*this_frame_target += (int)fast_extra_bits;
rc->vbr_bits_off_target_fast -= fast_extra_bits;
}
}
-void vp10_set_target_rate(VP10_COMP *cpi) {
+void av1_set_target_rate(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
int target_rate = rc->base_frame_target;
// Correction to rate target based on prior over or under shoot.
- if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
+ if (cpi->oxcf.rc_mode == AOM_VBR || cpi->oxcf.rc_mode == AOM_CQ)
vbr_rate_correction(cpi, &target_rate);
- vp10_rc_set_frame_target(cpi, target_rate);
+ av1_rc_set_frame_target(cpi, target_rate);
}
// Check if we should resize, based on average QP from past x frames.
// Only allow for resize at most one scale down for now, scaling factor is 2.
-int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_resize_one_pass_cbr(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int resize_now = 0;
cpi->resize_scale_num = 1;
@@ -1731,15 +1731,15 @@
rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
// Reset cyclic refresh parameters.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
- vp10_cyclic_refresh_reset_resize(cpi);
+ av1_cyclic_refresh_reset_resize(cpi);
// Get the projected qindex, based on the scaled target frame size (scaled
- // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
+ // so target_bits_per_mb in av1_rc_regulate_q will be correct target).
target_bits_per_frame = (resize_now == 1)
? rc->this_frame_target * tot_scale_change
: rc->this_frame_target / tot_scale_change;
active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
- qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
- active_worst_quality);
+ qindex = av1_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+ active_worst_quality);
// If resize is down, check if projected q index is close to worst_quality,
// and if so, reduce the rate correction factor (since likely can afford
// lower q for resized frame).
diff --git a/av1/encoder/ratectrl.h b/av1/encoder/ratectrl.h
index 88a14bc..b690918 100644
--- a/av1/encoder/ratectrl.h
+++ b/av1/encoder/ratectrl.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_RATECTRL_H_
-#define VP10_ENCODER_RATECTRL_H_
+#ifndef AV1_ENCODER_RATECTRL_H_
+#define AV1_ENCODER_RATECTRL_H_
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
#include "av1/common/blockd.h"
@@ -168,118 +168,116 @@
int rf_level_maxq[RATE_FACTOR_LEVELS];
} RATE_CONTROL;
-struct VP10_COMP;
-struct VP10EncoderConfig;
+struct AV1_COMP;
+struct AV1EncoderConfig;
-void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
- RATE_CONTROL *rc);
+void av1_rc_init(const struct AV1EncoderConfig *oxcf, int pass,
+ RATE_CONTROL *rc);
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
- double correction_factor,
- vpx_bit_depth_t bit_depth);
+int av1_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+ double correction_factor, aom_bit_depth_t bit_depth);
-double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
-void vp10_rc_init_minq_luts(void);
+void av1_rc_init_minq_luts(void);
-int vp10_rc_get_default_min_gf_interval(int width, int height,
- double framerate);
-// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
+int av1_rc_get_default_min_gf_interval(int width, int height, double framerate);
+// Note av1_rc_get_default_max_gf_interval() requires the min_gf_interval to
// be passed in to ensure that the max_gf_interval returned is at least as bis
// as that.
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+int av1_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
// Generally at the high level, the following flow is expected
// to be enforced for rate control:
// First call per frame, one of:
-// vp10_rc_get_one_pass_vbr_params()
-// vp10_rc_get_one_pass_cbr_params()
-// vp10_rc_get_first_pass_params()
-// vp10_rc_get_second_pass_params()
+// av1_rc_get_one_pass_vbr_params()
+// av1_rc_get_one_pass_cbr_params()
+// av1_rc_get_first_pass_params()
+// av1_rc_get_second_pass_params()
// depending on the usage to set the rate control encode parameters desired.
//
// Then, call encode_frame_to_data_rate() to perform the
// actual encode. This function will in turn call encode_frame()
// one or more times, followed by one of:
-// vp10_rc_postencode_update()
-// vp10_rc_postencode_update_drop_frame()
+// av1_rc_postencode_update()
+// av1_rc_postencode_update_drop_frame()
//
// The majority of rate control parameters are only expected
-// to be set in the vp10_rc_get_..._params() functions and
-// updated during the vp10_rc_postencode_update...() functions.
-// The only exceptions are vp10_rc_drop_frame() and
-// vp10_rc_update_rate_correction_factors() functions.
+// to be set in the av1_rc_get_..._params() functions and
+// updated during the av1_rc_postencode_update...() functions.
+// The only exceptions are av1_rc_drop_frame() and
+// av1_rc_update_rate_correction_factors() functions.
// Functions to set parameters for encoding before the actual
// encode_frame_to_data_rate() function.
-void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi);
-void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi);
+void av1_rc_get_one_pass_vbr_params(struct AV1_COMP *cpi);
+void av1_rc_get_one_pass_cbr_params(struct AV1_COMP *cpi);
// Post encode update of the rate control parameters based
// on bytes used
-void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used);
+void av1_rc_postencode_update(struct AV1_COMP *cpi, uint64_t bytes_used);
// Post encode update of the rate control parameters for dropped frames
-void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi);
+void av1_rc_postencode_update_drop_frame(struct AV1_COMP *cpi);
// Updates rate correction factors
// Changes only the rate correction factors in the rate control structure.
-void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi);
+void av1_rc_update_rate_correction_factors(struct AV1_COMP *cpi);
// Decide if we should drop this frame: For 1-pass CBR.
// Changes only the decimation count in the rate control structure
-int vp10_rc_drop_frame(struct VP10_COMP *cpi);
+int av1_rc_drop_frame(struct AV1_COMP *cpi);
// Computes frame size bounds.
-void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
- int this_frame_target,
- int *frame_under_shoot_limit,
- int *frame_over_shoot_limit);
+void av1_rc_compute_frame_size_bounds(const struct AV1_COMP *cpi,
+ int this_frame_target,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit);
// Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
- int *top_index);
+int av1_rc_pick_q_and_bounds(const struct AV1_COMP *cpi, int *bottom_index,
+ int *top_index);
// Estimates q to achieve a target bits per frame
-int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
- int active_best_quality, int active_worst_quality);
+int av1_rc_regulate_q(const struct AV1_COMP *cpi, int target_bits_per_frame,
+ int active_best_quality, int active_worst_quality);
// Estimates bits per mb for a given qindex and correction factor.
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
- double correction_factor, vpx_bit_depth_t bit_depth);
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor, aom_bit_depth_t bit_depth);
// Clamping utilities for bitrate targets for iframes and pframes.
-int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
- int target);
-int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
- int target);
+int av1_rc_clamp_iframe_target_size(const struct AV1_COMP *const cpi,
+ int target);
+int av1_rc_clamp_pframe_target_size(const struct AV1_COMP *const cpi,
+ int target);
// Utility to set frame_target into the RATE_CONTROL structure
-// This function is called only from the vp10_rc_get_..._params() functions.
-void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
+// This function is called only from the av1_rc_get_..._params() functions.
+void av1_rc_set_frame_target(struct AV1_COMP *cpi, int target);
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a target q value
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
- vpx_bit_depth_t bit_depth);
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+ aom_bit_depth_t bit_depth);
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a value that should equate to the given rate ratio.
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
- int qindex, double rate_target_ratio,
- vpx_bit_depth_t bit_depth);
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+ int qindex, double rate_target_ratio,
+ aom_bit_depth_t bit_depth);
-int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
+int av1_frame_type_qdelta(const struct AV1_COMP *cpi, int rf_level, int q);
-void vp10_rc_update_framerate(struct VP10_COMP *cpi);
+void av1_rc_update_framerate(struct AV1_COMP *cpi);
-void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
- RATE_CONTROL *const rc);
+void av1_rc_set_gf_interval_range(const struct AV1_COMP *const cpi,
+ RATE_CONTROL *const rc);
-void vp10_set_target_rate(struct VP10_COMP *cpi);
+void av1_set_target_rate(struct AV1_COMP *cpi);
-int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi);
+int av1_resize_one_pass_cbr(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RATECTRL_H_
+#endif // AV1_ENCODER_RATECTRL_H_
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index a8a8691..2379db0 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -12,10 +12,10 @@
#include <math.h>
#include <stdio.h>
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/bitops.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
@@ -45,13 +45,13 @@
// Factor to weigh the rate for switchable interp filters.
#define SWITCHABLE_INTERP_RATE_FACTOR 1
-void vp10_rd_cost_reset(RD_COST *rd_cost) {
+void av1_rd_cost_reset(RD_COST *rd_cost) {
rd_cost->rate = INT_MAX;
rd_cost->dist = INT64_MAX;
rd_cost->rdcost = INT64_MAX;
}
-void vp10_rd_cost_init(RD_COST *rd_cost) {
+void av1_rd_cost_init(RD_COST *rd_cost) {
rd_cost->rate = 0;
rd_cost->dist = 0;
rd_cost->rdcost = 0;
@@ -68,94 +68,90 @@
#endif // CONFIG_EXT_PARTITION
};
-static void fill_mode_costs(VP10_COMP *cpi) {
+static void fill_mode_costs(AV1_COMP *cpi) {
const FRAME_CONTEXT *const fc = cpi->common.fc;
int i, j;
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
- vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
- vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->y_mode_costs[i][j], av1_kf_y_mode_prob[i][j],
+ av1_intra_mode_tree);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
- vp10_cost_tokens(cpi->mbmode_cost[i], fc->y_mode_prob[i],
- vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->mbmode_cost[i], fc->y_mode_prob[i],
+ av1_intra_mode_tree);
for (i = 0; i < INTRA_MODES; ++i)
- vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
- vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+ av1_intra_mode_tree);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- vp10_cost_tokens(cpi->switchable_interp_costs[i],
- fc->switchable_interp_prob[i],
- vp10_switchable_interp_tree);
+ av1_cost_tokens(cpi->switchable_interp_costs[i],
+ fc->switchable_interp_prob[i], av1_switchable_interp_tree);
for (i = 0; i < PALETTE_BLOCK_SIZES; ++i) {
- vp10_cost_tokens(cpi->palette_y_size_cost[i],
- vp10_default_palette_y_size_prob[i],
- vp10_palette_size_tree);
- vp10_cost_tokens(cpi->palette_uv_size_cost[i],
- vp10_default_palette_uv_size_prob[i],
- vp10_palette_size_tree);
+ av1_cost_tokens(cpi->palette_y_size_cost[i],
+ av1_default_palette_y_size_prob[i], av1_palette_size_tree);
+ av1_cost_tokens(cpi->palette_uv_size_cost[i],
+ av1_default_palette_uv_size_prob[i], av1_palette_size_tree);
}
for (i = 0; i < PALETTE_MAX_SIZE - 1; ++i)
for (j = 0; j < PALETTE_COLOR_CONTEXTS; ++j) {
- vp10_cost_tokens(cpi->palette_y_color_cost[i][j],
- vp10_default_palette_y_color_prob[i][j],
- vp10_palette_color_tree[i]);
- vp10_cost_tokens(cpi->palette_uv_color_cost[i][j],
- vp10_default_palette_uv_color_prob[i][j],
- vp10_palette_color_tree[i]);
+ av1_cost_tokens(cpi->palette_y_color_cost[i][j],
+ av1_default_palette_y_color_prob[i][j],
+ av1_palette_color_tree[i]);
+ av1_cost_tokens(cpi->palette_uv_color_cost[i][j],
+ av1_default_palette_uv_color_prob[i][j],
+ av1_palette_color_tree[i]);
}
for (i = 0; i < TX_SIZES - 1; ++i)
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
- vp10_cost_tokens(cpi->tx_size_cost[i][j], fc->tx_size_probs[i][j],
- vp10_tx_size_tree[i]);
+ av1_cost_tokens(cpi->tx_size_cost[i][j], fc->tx_size_probs[i][j],
+ av1_tx_size_tree[i]);
#if CONFIG_EXT_TX
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
if (use_inter_ext_tx_for_txsize[s][i]) {
- vp10_cost_tokens(cpi->inter_tx_type_costs[s][i],
- fc->inter_ext_tx_prob[s][i],
- vp10_ext_tx_inter_tree[s]);
+ av1_cost_tokens(cpi->inter_tx_type_costs[s][i],
+ fc->inter_ext_tx_prob[s][i], av1_ext_tx_inter_tree[s]);
}
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
if (use_intra_ext_tx_for_txsize[s][i]) {
for (j = 0; j < INTRA_MODES; ++j)
- vp10_cost_tokens(cpi->intra_tx_type_costs[s][i][j],
- fc->intra_ext_tx_prob[s][i][j],
- vp10_ext_tx_intra_tree[s]);
+ av1_cost_tokens(cpi->intra_tx_type_costs[s][i][j],
+ fc->intra_ext_tx_prob[s][i][j],
+ av1_ext_tx_intra_tree[s]);
}
}
}
#else
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
- fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
+ av1_cost_tokens(cpi->intra_tx_type_costs[i][j],
+ fc->intra_ext_tx_prob[i][j], av1_ext_tx_tree);
}
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
- vp10_ext_tx_tree);
+ av1_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
+ av1_ext_tx_tree);
}
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
- vp10_cost_tokens(cpi->intra_filter_cost[i], fc->intra_filter_probs[i],
- vp10_intra_filter_tree);
+ av1_cost_tokens(cpi->intra_filter_cost[i], fc->intra_filter_probs[i],
+ av1_intra_filter_tree);
#endif // CONFIG_EXT_INTRA
}
-void vp10_fill_token_costs(vp10_coeff_cost *c,
+void av1_fill_token_costs(av1_coeff_cost *c,
#if CONFIG_ANS
- coeff_cdf_model (*cdf)[PLANE_TYPES],
+ coeff_cdf_model (*cdf)[PLANE_TYPES],
#endif // CONFIG_ANS
- vp10_coeff_probs_model (*p)[PLANE_TYPES]) {
+ av1_coeff_probs_model (*p)[PLANE_TYPES]) {
int i, j, k, l;
TX_SIZE t;
for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -164,17 +160,17 @@
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
#if CONFIG_ANS
- const vpx_prob *const tree_probs = p[t][i][j][k][l];
- vp10_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
- cdf[t][i][j][k][l], 0);
- vp10_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
- cdf[t][i][j][k][l], 1);
+ const aom_prob *const tree_probs = p[t][i][j][k][l];
+ av1_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
+ cdf[t][i][j][k][l], 0);
+ av1_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
+ cdf[t][i][j][k][l], 1);
#else
- vpx_prob probs[ENTROPY_NODES];
- vp10_model_to_full_probs(p[t][i][j][k][l], probs);
- vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
- vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
- vp10_coef_tree);
+ aom_prob probs[ENTROPY_NODES];
+ av1_model_to_full_probs(p[t][i][j][k][l], probs);
+ av1_cost_tokens((int *)c[t][i][j][k][0][l], probs, av1_coef_tree);
+ av1_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+ av1_coef_tree);
#endif // CONFIG_ANS
assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
c[t][i][j][k][1][l][EOB_TOKEN]);
@@ -185,7 +181,7 @@
static int sad_per_bit16lut_8[QINDEX_RANGE];
static int sad_per_bit4lut_8[QINDEX_RANGE];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int sad_per_bit16lut_10[QINDEX_RANGE];
static int sad_per_bit4lut_10[QINDEX_RANGE];
static int sad_per_bit16lut_12[QINDEX_RANGE];
@@ -193,26 +189,26 @@
#endif
static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range,
- vpx_bit_depth_t bit_depth) {
+ aom_bit_depth_t bit_depth) {
int i;
// Initialize the sad lut tables using a formulaic calculation for now.
// This is to make it easier to resolve the impact of experimental changes
// to the quantizer tables.
for (i = 0; i < range; i++) {
- const double q = vp10_convert_qindex_to_q(i, bit_depth);
+ const double q = av1_convert_qindex_to_q(i, bit_depth);
bit16lut[i] = (int)(0.0418 * q + 2.4107);
bit4lut[i] = (int)(0.063 * q + 2.742);
}
}
-void vp10_init_me_luts(void) {
+void av1_init_me_luts(void) {
init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
- VPX_BITS_8);
-#if CONFIG_VP9_HIGHBITDEPTH
+ AOM_BITS_8);
+#if CONFIG_AOM_HIGHBITDEPTH
init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
- VPX_BITS_10);
+ AOM_BITS_10);
init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
- VPX_BITS_12);
+ AOM_BITS_12);
#endif
}
@@ -230,25 +226,25 @@
#endif // CONFIG_EXT_REFS
};
-int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
- const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_compute_rd_mult(const AV1_COMP *cpi, int qindex) {
+ const int64_t q = av1_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
int64_t rdmult = 0;
switch (cpi->common.bit_depth) {
- case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
- case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
- case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
+ case AOM_BITS_8: rdmult = 88 * q * q / 24; break;
+ case AOM_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+ case AOM_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
int64_t rdmult = 88 * q * q / 24;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
- const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100));
+ const int boost_index = AOMMIN(15, (cpi->rc.gfu_boost / 100));
rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
@@ -257,57 +253,56 @@
return (int)rdmult;
}
-static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
+static int compute_rd_thresh_factor(int qindex, aom_bit_depth_t bit_depth) {
double q;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
- case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
- case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
+ case AOM_BITS_8: q = av1_dc_quant(qindex, 0, AOM_BITS_8) / 4.0; break;
+ case AOM_BITS_10: q = av1_dc_quant(qindex, 0, AOM_BITS_10) / 16.0; break;
+ case AOM_BITS_12: q = av1_dc_quant(qindex, 0, AOM_BITS_12) / 64.0; break;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
(void)bit_depth;
- q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ q = av1_dc_quant(qindex, 0, AOM_BITS_8) / 4.0;
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(debargha): Adjust the function below.
- return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
+ return AOMMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
}
-void vp10_initialize_me_consts(const VP10_COMP *cpi, MACROBLOCK *x,
- int qindex) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_initialize_me_consts(const AV1_COMP *cpi, MACROBLOCK *x, int qindex) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (cpi->common.bit_depth) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
x->sadperbit16 = sad_per_bit16lut_8[qindex];
x->sadperbit4 = sad_per_bit4lut_8[qindex];
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
x->sadperbit16 = sad_per_bit16lut_10[qindex];
x->sadperbit4 = sad_per_bit4lut_10[qindex];
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
x->sadperbit16 = sad_per_bit16lut_12[qindex];
x->sadperbit4 = sad_per_bit4lut_12[qindex];
break;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
}
#else
(void)cpi;
x->sadperbit16 = sad_per_bit16lut_8[qindex];
x->sadperbit4 = sad_per_bit4lut_8[qindex];
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
-static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
+static void set_block_thresholds(const AV1_COMMON *cm, RD_OPT *rd) {
int i, bsize, segment_id;
for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
const int qindex =
- clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+ clamp(av1_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
cm->y_dc_delta_q,
0, MAXQ);
const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
@@ -335,10 +330,10 @@
}
#if CONFIG_REF_MV
-void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
+void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
- mbmi_ext->ref_mv_stack[ref_frame]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
+ mbmi_ext->ref_mv_stack[ref_frame]);
x->mvcost = x->mv_cost_stack[nmv_ctx];
x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
x->mvsadcost = x->mvcost;
@@ -349,16 +344,16 @@
}
#endif
-void vp10_initialize_rd_consts(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_initialize_rd_consts(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
RD_OPT *const rd = &cpi->rd;
int i;
- vpx_clear_system_state();
+ aom_clear_system_state();
rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
- rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ rd->RDMULT = av1_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
set_error_per_bit(x, rd->RDMULT);
@@ -369,10 +364,10 @@
int nmv_ctx;
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
- vpx_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
+ aom_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = 1;
- vp10_build_nmv_cost_table(
+ av1_build_nmv_cost_table(
x->nmv_vec_cost[nmv_ctx],
cm->allow_high_precision_mv ? x->nmvcost_hp[nmv_ctx]
: x->nmvcost[nmv_ctx],
@@ -381,40 +376,40 @@
x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] = 0;
x->zero_rmv_cost[nmv_ctx][0] =
- vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
+ av1_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
x->zero_rmv_cost[nmv_ctx][1] =
- vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
+ av1_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
}
x->mvcost = x->mv_cost_stack[0];
x->nmvjointcost = x->nmv_vec_cost[0];
x->mvsadcost = x->mvcost;
x->nmvjointsadcost = x->nmvjointcost;
#else
- vp10_build_nmv_cost_table(
+ av1_build_nmv_cost_table(
x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
cm->allow_high_precision_mv);
#endif
}
if (cpi->oxcf.pass != 1) {
- vp10_fill_token_costs(x->token_costs,
+ av1_fill_token_costs(x->token_costs,
#if CONFIG_ANS
- cm->fc->coef_cdfs,
+ cm->fc->coef_cdfs,
#endif // CONFIG_ANS
- cm->fc->coef_probs);
+ cm->fc->coef_probs);
if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
cm->frame_type == KEY_FRAME) {
#if CONFIG_EXT_PARTITION_TYPES
- vp10_cost_tokens(cpi->partition_cost[0], cm->fc->partition_prob[0],
- vp10_partition_tree);
+ av1_cost_tokens(cpi->partition_cost[0], cm->fc->partition_prob[0],
+ av1_partition_tree);
for (i = 1; i < PARTITION_CONTEXTS; ++i)
- vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
- vp10_ext_partition_tree);
+ av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+ av1_ext_partition_tree);
#else
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
- vp10_partition_tree);
+ av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+ av1_partition_tree);
#endif // CONFIG_EXT_PARTITION_TYPES
}
@@ -423,47 +418,47 @@
if (!frame_is_intra_only(cm)) {
#if CONFIG_REF_MV
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) {
- cpi->newmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->newmv_prob[i], 0);
- cpi->newmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->newmv_prob[i], 1);
+ cpi->newmv_mode_cost[i][0] = av1_cost_bit(cm->fc->newmv_prob[i], 0);
+ cpi->newmv_mode_cost[i][1] = av1_cost_bit(cm->fc->newmv_prob[i], 1);
}
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i) {
- cpi->zeromv_mode_cost[i][0] = vp10_cost_bit(cm->fc->zeromv_prob[i], 0);
- cpi->zeromv_mode_cost[i][1] = vp10_cost_bit(cm->fc->zeromv_prob[i], 1);
+ cpi->zeromv_mode_cost[i][0] = av1_cost_bit(cm->fc->zeromv_prob[i], 0);
+ cpi->zeromv_mode_cost[i][1] = av1_cost_bit(cm->fc->zeromv_prob[i], 1);
}
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i) {
- cpi->refmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->refmv_prob[i], 0);
- cpi->refmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->refmv_prob[i], 1);
+ cpi->refmv_mode_cost[i][0] = av1_cost_bit(cm->fc->refmv_prob[i], 0);
+ cpi->refmv_mode_cost[i][1] = av1_cost_bit(cm->fc->refmv_prob[i], 1);
}
for (i = 0; i < DRL_MODE_CONTEXTS; ++i) {
- cpi->drl_mode_cost0[i][0] = vp10_cost_bit(cm->fc->drl_prob[i], 0);
- cpi->drl_mode_cost0[i][1] = vp10_cost_bit(cm->fc->drl_prob[i], 1);
+ cpi->drl_mode_cost0[i][0] = av1_cost_bit(cm->fc->drl_prob[i], 0);
+ cpi->drl_mode_cost0[i][1] = av1_cost_bit(cm->fc->drl_prob[i], 1);
}
#if CONFIG_EXT_INTER
- cpi->new2mv_mode_cost[0] = vp10_cost_bit(cm->fc->new2mv_prob, 0);
- cpi->new2mv_mode_cost[1] = vp10_cost_bit(cm->fc->new2mv_prob, 1);
+ cpi->new2mv_mode_cost[0] = av1_cost_bit(cm->fc->new2mv_prob, 0);
+ cpi->new2mv_mode_cost[1] = av1_cost_bit(cm->fc->new2mv_prob, 1);
#endif // CONFIG_EXT_INTER
#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
- cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+ av1_cost_tokens((int *)cpi->inter_mode_cost[i],
+ cm->fc->inter_mode_probs[i], av1_inter_mode_tree);
#endif // CONFIG_REF_MV
#if CONFIG_EXT_INTER
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- vp10_cost_tokens((int *)cpi->inter_compound_mode_cost[i],
- cm->fc->inter_compound_mode_probs[i],
- vp10_inter_compound_mode_tree);
+ av1_cost_tokens((int *)cpi->inter_compound_mode_cost[i],
+ cm->fc->inter_compound_mode_probs[i],
+ av1_inter_compound_mode_tree);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
- vp10_cost_tokens((int *)cpi->interintra_mode_cost[i],
- cm->fc->interintra_mode_prob[i],
- vp10_interintra_mode_tree);
+ av1_cost_tokens((int *)cpi->interintra_mode_cost[i],
+ cm->fc->interintra_mode_prob[i],
+ av1_interintra_mode_tree);
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; i++) {
- vp10_cost_tokens((int *)cpi->motvar_cost[i], cm->fc->motvar_prob[i],
- vp10_motvar_tree);
+ av1_cost_tokens((int *)cpi->motvar_cost[i], cm->fc->motvar_prob[i],
+ av1_motvar_tree);
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
}
@@ -536,9 +531,9 @@
*d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
}
-void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
- unsigned int qstep, int *rate,
- int64_t *dist) {
+void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
+ unsigned int qstep, int *rate,
+ int64_t *dist) {
// This function models the rate and distortion for a Laplacian
// source with given variance when quantized with a uniform quantizer
// with given stepsize. The closed form expressions are in:
@@ -553,9 +548,9 @@
static const uint32_t MAX_XSQ_Q10 = 245727;
const uint64_t xsq_q10_64 =
(((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var;
- const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10);
+ const int xsq_q10 = (int)AOMMIN(xsq_q10_64, MAX_XSQ_Q10);
model_rd_norm(xsq_q10, &r_q10, &d_q10);
- *rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - VP10_PROB_COST_SHIFT);
+ *rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - AV1_PROB_COST_SHIFT);
*dist = (var * (int64_t)d_q10 + 512) >> 10;
}
}
@@ -633,16 +628,16 @@
}
}
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
- const struct macroblockd_plane *pd,
- ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
- ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+ const struct macroblockd_plane *pd,
+ ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+ ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
get_entropy_contexts_plane(plane_bsize, tx_size, pd, t_above, t_left);
}
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
- int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
+void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
int best_index = 0;
@@ -672,7 +667,7 @@
if (i == 1 && near_same_nearest) continue;
fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
- max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
+ max_mv = AOMMAX(max_mv, AOMMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
zero_seen |= (fp_row == 0 && fp_col == 0);
@@ -694,11 +689,11 @@
x->pred_mv_sad[ref_frame] = best_sad;
}
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
- struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col, const struct scale_factors *scale,
- const struct scale_factors *scale_uv) {
+void av1_setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
int i;
dst[0].buf = src->y_buffer;
@@ -716,23 +711,23 @@
}
}
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
- int stride) {
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride) {
const int bw = b_width_log2_lookup[plane_bsize];
const int y = 4 * (raster_block >> bw);
const int x = 4 * (raster_block & ((1 << bw) - 1));
return y * stride + x;
}
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base) {
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+ int16_t *base) {
const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
+ return base + av1_raster_block_offset(plane_bsize, raster_block, stride);
}
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
- int ref_frame) {
- const VP10_COMMON *const cm = &cpi->common;
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const AV1_COMP *cpi,
+ int ref_frame) {
+ const AV1_COMMON *const cm = &cpi->common;
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
@@ -741,8 +736,7 @@
}
#if CONFIG_DUAL_FILTER
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
- const MACROBLOCKD *const xd) {
+int av1_get_switchable_rate(const AV1_COMP *cpi, const MACROBLOCKD *const xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int inter_filter_cost = 0;
int dir;
@@ -751,7 +745,7 @@
if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
inter_filter_cost +=
cpi->switchable_interp_costs[ctx][mbmi->interp_filter[dir]];
}
@@ -759,19 +753,18 @@
return SWITCHABLE_INTERP_RATE_FACTOR * inter_filter_cost;
}
#else
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
- const MACROBLOCKD *const xd) {
+int av1_get_switchable_rate(const AV1_COMP *cpi, const MACROBLOCKD *const xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
#if CONFIG_EXT_INTERP
- if (!vp10_is_interp_needed(xd)) return 0;
+ if (!av1_is_interp_needed(xd)) return 0;
#endif // CONFIG_EXT_INTERP
return SWITCHABLE_INTERP_RATE_FACTOR *
cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
}
#endif
-void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds(AV1_COMP *cpi) {
int i;
RD_OPT *const rd = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
@@ -1046,7 +1039,7 @@
#endif // CONFIG_EXT_INTER
}
-void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds_sub8x8(AV1_COMP *cpi) {
static const int thresh_mult[2][MAX_REFS] = {
#if CONFIG_EXT_REFS
{ 2500, 2500, 2500, 2500, 2500, 2500, 4500, 4500, 4500, 4500, 4500, 4500,
@@ -1063,41 +1056,41 @@
memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
}
-void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
- int (*factor_buf)[MAX_MODES], int rd_thresh,
- int bsize, int best_mode_index) {
+void av1_update_rd_thresh_fact(const AV1_COMMON *const cm,
+ int (*factor_buf)[MAX_MODES], int rd_thresh,
+ int bsize, int best_mode_index) {
if (rd_thresh > 0) {
const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
int mode;
for (mode = 0; mode < top_mode; ++mode) {
- const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4);
- const BLOCK_SIZE max_size = VPXMIN(bsize + 2, cm->sb_size);
+ const BLOCK_SIZE min_size = AOMMAX(bsize - 1, BLOCK_4X4);
+ const BLOCK_SIZE max_size = AOMMIN(bsize + 2, cm->sb_size);
BLOCK_SIZE bs;
for (bs = min_size; bs <= max_size; ++bs) {
int *const fact = &factor_buf[bs][mode];
if (mode == best_mode_index) {
*fact -= (*fact >> 4);
} else {
- *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT);
+ *fact = AOMMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT);
}
}
}
}
}
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
- vpx_bit_depth_t bit_depth) {
- const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
+ aom_bit_depth_t bit_depth) {
+ const int q = av1_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return 20 * q;
- case VPX_BITS_10: return 5 * q;
- case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
+ case AOM_BITS_8: return 20 * q;
+ case AOM_BITS_10: return 5 * q;
+ case AOM_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
return -1;
}
#else
return 20 * q;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 9680215..c902429 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_RD_H_
-#define VP10_ENCODER_RD_H_
+#ifndef AV1_ENCODER_RD_H_
+#define AV1_ENCODER_RD_H_
#include <limits.h>
@@ -30,10 +30,10 @@
#define RD_EPB_SHIFT 6
#define RDCOST(RM, DM, R, D) \
- (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP10_PROB_COST_SHIFT) + (D << DM))
+ (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), AV1_PROB_COST_SHIFT) + (D << DM))
-#define RDCOST_DBL(RM, DM, R, D) \
- (((((double)(R)) * (RM)) / (double)(1 << VP10_PROB_COST_SHIFT)) + \
+#define RDCOST_DBL(RM, DM, R, D) \
+ (((((double)(R)) * (RM)) / (double)(1 << AV1_PROB_COST_SHIFT)) + \
((double)(D) * (1 << (DM))))
#define QIDX_SKIP_THRESH 115
@@ -71,7 +71,7 @@
#define RD_THRESH_INC 1
// This enumerator type needs to be kept aligned with the mode order in
-// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
+// const MODE_DEFINITION av1_mode_order[MAX_MODES] used in the rd code.
typedef enum {
THR_NEARESTMV,
#if CONFIG_EXT_REFS
@@ -378,87 +378,86 @@
} RD_COST;
// Reset the rate distortion cost values to maximum (invalid) value.
-void vp10_rd_cost_reset(RD_COST *rd_cost);
+void av1_rd_cost_reset(RD_COST *rd_cost);
// Initialize the rate distortion cost values to zero.
-void vp10_rd_cost_init(RD_COST *rd_cost);
+void av1_rd_cost_init(RD_COST *rd_cost);
struct TileInfo;
struct TileDataEnc;
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
-int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
+int av1_compute_rd_mult(const struct AV1_COMP *cpi, int qindex);
-void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
+void av1_initialize_rd_consts(struct AV1_COMP *cpi);
-void vp10_initialize_me_consts(const struct VP10_COMP *cpi, MACROBLOCK *x,
- int qindex);
+void av1_initialize_me_consts(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ int qindex);
-void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n,
- unsigned int qstep, int *rate,
- int64_t *dist);
+void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n,
+ unsigned int qstep, int *rate, int64_t *dist);
-int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
- const MACROBLOCKD *const xd);
+int av1_get_switchable_rate(const struct AV1_COMP *cpi,
+ const MACROBLOCKD *const xd);
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
- int stride);
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride);
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base);
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+ int16_t *base);
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
- int ref_frame);
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const struct AV1_COMP *cpi,
+ int ref_frame);
-void vp10_init_me_luts(void);
+void av1_init_me_luts(void);
#if CONFIG_REF_MV
-void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
+void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
#endif
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
- const struct macroblockd_plane *pd,
- ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
- ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]);
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+ const struct macroblockd_plane *pd,
+ ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+ ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]);
-void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds(struct AV1_COMP *cpi);
-void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds_sub8x8(struct AV1_COMP *cpi);
-void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
- int (*fact)[MAX_MODES], int rd_thresh,
- int bsize, int best_mode_index);
+void av1_update_rd_thresh_fact(const AV1_COMMON *const cm,
+ int (*fact)[MAX_MODES], int rd_thresh, int bsize,
+ int best_mode_index);
-void vp10_fill_token_costs(vp10_coeff_cost *c,
+void av1_fill_token_costs(av1_coeff_cost *c,
#if CONFIG_ANS
- coeff_cdf_model (*cdf)[PLANE_TYPES],
+ coeff_cdf_model (*cdf)[PLANE_TYPES],
#endif // CONFIG_ANS
- vp10_coeff_probs_model (*p)[PLANE_TYPES]);
+ av1_coeff_probs_model (*p)[PLANE_TYPES]);
static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
int thresh_fact) {
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
- int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
+void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
x->errorperbit = rdmult >> RD_EPB_SHIFT;
x->errorperbit += (x->errorperbit == 0);
}
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
- struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src, int mi_row,
- int mi_col, const struct scale_factors *scale,
- const struct scale_factors *scale_uv);
+void av1_setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv);
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
- vpx_bit_depth_t bit_depth);
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
+ aom_bit_depth_t bit_depth);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RD_H_
+#endif // AV1_ENCODER_RD_H_
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 62334a3..bd93746 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -11,12 +11,12 @@
#include <assert.h>
#include <math.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
@@ -120,7 +120,7 @@
typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
struct rdcost_block_args {
- const VP10_COMP *cpi;
+ const AV1_COMP *cpi;
MACROBLOCK *x;
ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE];
ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE];
@@ -136,7 +136,7 @@
};
#define LAST_NEW_MV_INDEX 6
-static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
+static const MODE_DEFINITION av1_mode_order[MAX_MODES] = {
{ NEARESTMV, { LAST_FRAME, NONE } },
#if CONFIG_EXT_REFS
{ NEARESTMV, { LAST2_FRAME, NONE } },
@@ -391,7 +391,7 @@
#endif // CONFIG_EXT_INTER
};
-static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
+static const REF_DEFINITION av1_ref_order[MAX_REFS] = {
{ { LAST_FRAME, NONE } },
#if CONFIG_EXT_REFS
{ { LAST2_FRAME, NONE } }, { { LAST3_FRAME, NONE } },
@@ -417,9 +417,9 @@
int l = get_unsigned_bits(n), m = (1 << l) - n;
if (l == 0) return 0;
if (v < m)
- return (l - 1) * vp10_cost_bit(128, 0);
+ return (l - 1) * av1_cost_bit(128, 0);
else
- return l * vp10_cost_bit(128, 0);
+ return l * av1_cost_bit(128, 0);
}
// constants for prune 1 and prune 2 decision boundaries
@@ -444,7 +444,7 @@
#endif // CONFIG_EXT_TX
};
-static void get_energy_distribution_fine(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static void get_energy_distribution_fine(const AV1_COMP *cpi, BLOCK_SIZE bsize,
uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
double *hordist, double *verdist) {
@@ -459,7 +459,7 @@
int i, j, index;
int w_shift = bw == 8 ? 1 : 2;
int h_shift = bh == 8 ? 1 : 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth) {
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
@@ -471,7 +471,7 @@
(src16[j + i * src_stride] - dst16[j + i * dst_stride]);
}
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (i = 0; i < bh; ++i)
for (j = 0; j < bw; ++j) {
@@ -479,9 +479,9 @@
esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
(src[j + i * src_stride] - dst[j + i * dst_stride]);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} else {
var[0] = cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[0]);
var[1] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
@@ -569,9 +569,9 @@
(void)var[15];
}
-static int adst_vs_flipadst(const VP10_COMP *cpi, BLOCK_SIZE bsize,
- uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, double *hdist, double *vdist) {
+static int adst_vs_flipadst(const AV1_COMP *cpi, BLOCK_SIZE bsize, uint8_t *src,
+ int src_stride, uint8_t *dst, int dst_stride,
+ double *hdist, double *vdist) {
int prune_bitmask = 0;
double svm_proj_h = 0, svm_proj_v = 0;
get_energy_distribution_fine(cpi, bsize, src, src_stride, dst, dst_stride,
@@ -657,7 +657,7 @@
}
// Performance drop: 0.5%, Speed improvement: 24%
-static int prune_two_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static int prune_two_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd, int adst_flipadst,
int dct_idtx) {
struct macroblock_plane *const p = &x->plane[0];
@@ -668,7 +668,7 @@
double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
double hcorr, vcorr;
int prune = 0;
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
if (adst_flipadst)
prune |= adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride,
@@ -680,17 +680,17 @@
#endif // CONFIG_EXT_TX
// Performance drop: 0.3%, Speed improvement: 5%
-static int prune_one_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static int prune_one_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd) {
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
return adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride, pd->dst.buf,
pd->dst.stride, hdist, vdist);
}
-static int prune_tx_types(const VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+static int prune_tx_types(const AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
MACROBLOCKD *xd, int tx_set) {
#if CONFIG_EXT_TX
const int *tx_set_1D = ext_tx_used_inter_1D[tx_set];
@@ -736,15 +736,15 @@
#endif
}
-static void model_rd_from_sse(const VP10_COMP *const cpi,
+static void model_rd_from_sse(const AV1_COMP *const cpi,
const MACROBLOCKD *const xd, BLOCK_SIZE bsize,
int plane, int64_t sse, int *rate,
int64_t *dist) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int dequant_shift =
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
(xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
3;
// Fast approximate the modelling function.
@@ -754,19 +754,19 @@
if (quantizer < 120)
*rate = (int)((square_error * (280 - quantizer)) >>
- (16 - VP10_PROB_COST_SHIFT));
+ (16 - AV1_PROB_COST_SHIFT));
else
*rate = 0;
*dist = (square_error * quantizer) >> 8;
} else {
- vp10_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
- pd->dequant[1] >> dequant_shift, rate, dist);
+ av1_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
+ pd->dequant[1] >> dequant_shift, rate, dist);
}
*dist <<= 4;
}
-static void model_rd_for_sb(const VP10_COMP *const cpi, BLOCK_SIZE bsize,
+static void model_rd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd, int plane_from,
int plane_to, int *out_rate_sum,
int64_t *out_dist_sum, int *skip_txfm_sb,
@@ -813,8 +813,8 @@
*out_dist_sum = dist_sum;
}
-int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
- intptr_t block_size, int64_t *ssz) {
+int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
int i;
int64_t error = 0, sqcoeff = 0;
@@ -828,8 +828,8 @@
return error;
}
-int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
- int block_size) {
+int64_t av1_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+ int block_size) {
int i;
int64_t error = 0;
@@ -841,10 +841,10 @@
return error;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
- const tran_low_t *dqcoeff,
- intptr_t block_size, int64_t *ssz, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
+ const tran_low_t *dqcoeff, intptr_t block_size,
+ int64_t *ssz, int bd) {
int i;
int64_t error = 0, sqcoeff = 0;
int shift = 2 * (bd - 8);
@@ -862,7 +862,7 @@
*ssz = sqcoeff;
return error;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* The trailing '0' is a terminator which is used inside cost_coeffs() to
* decide whether to include cost of a trailing EOB node or not (i.e. we
@@ -895,10 +895,10 @@
int pt = combine_entropy_contexts(*A, *L);
#endif
int c, cost;
-#if CONFIG_VP9_HIGHBITDEPTH
- const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
#else
- const int *cat6_high_cost = vp10_get_high_cost_table(8);
+ const int *cat6_high_cost = av1_get_high_cost_table(8);
#endif
#if !CONFIG_VAR_TX && !CONFIG_SUPERTX
@@ -918,10 +918,10 @@
// dc token
int v = qcoeff[0];
int16_t prev_t;
- cost = vp10_get_token_cost(v, &prev_t, cat6_high_cost);
+ cost = av1_get_token_cost(v, &prev_t, cat6_high_cost);
cost += (*token_costs)[0][pt][prev_t];
- token_cache[0] = vp10_pt_energy_class[prev_t];
+ token_cache[0] = av1_pt_energy_class[prev_t];
++token_costs;
// ac tokens
@@ -930,7 +930,7 @@
int16_t t;
v = qcoeff[rc];
- cost += vp10_get_token_cost(v, &t, cat6_high_cost);
+ cost += av1_get_token_cost(v, &t, cat6_high_cost);
cost += (*token_costs)[!prev_t][!prev_t][t];
prev_t = t;
if (!--band_left) {
@@ -949,10 +949,10 @@
int v = qcoeff[0];
int16_t tok;
unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
- cost = vp10_get_token_cost(v, &tok, cat6_high_cost);
+ cost = av1_get_token_cost(v, &tok, cat6_high_cost);
cost += (*token_costs)[0][pt][tok];
- token_cache[0] = vp10_pt_energy_class[tok];
+ token_cache[0] = av1_pt_energy_class[tok];
++token_costs;
tok_cost_ptr = &((*token_costs)[!tok]);
@@ -962,10 +962,10 @@
const int rc = scan[c];
v = qcoeff[rc];
- cost += vp10_get_token_cost(v, &tok, cat6_high_cost);
+ cost += av1_get_token_cost(v, &tok, cat6_high_cost);
pt = get_coef_context(nb, token_cache, c);
cost += (*tok_cost_ptr)[pt][tok];
- token_cache[rc] = vp10_pt_energy_class[tok];
+ token_cache[rc] = av1_pt_energy_class[tok];
if (!--band_left) {
band_left = *band_count++;
++token_costs;
@@ -989,8 +989,8 @@
return cost;
}
-static void dist_block(const VP10_COMP *cpi, MACROBLOCK *x, int plane,
- int block, int blk_row, int blk_col, TX_SIZE tx_size,
+static void dist_block(const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block,
+ int blk_row, int blk_col, TX_SIZE tx_size,
int64_t *out_dist, int64_t *out_sse) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
@@ -1004,16 +1004,16 @@
int shift = (MAX_TX_SCALE - get_tx_scale(xd, tx_type, tx_size)) * 2;
tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
- *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
- &this_sse, bd) >>
+ *out_dist = av1_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+ &this_sse, bd) >>
shift;
#else
*out_dist =
- vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
+ av1_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
shift;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
*out_sse = this_sse >> shift;
} else {
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
@@ -1037,12 +1037,12 @@
if (eob) {
const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, recon16[MAX_TX_SQUARE]);
uint8_t *recon = (uint8_t *)recon16;
#else
DECLARE_ALIGNED(16, uint8_t, recon[MAX_TX_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const PLANE_TYPE plane_type = plane == 0 ? PLANE_TYPE_Y : PLANE_TYPE_UV;
@@ -1053,17 +1053,17 @@
inv_txfm_param.eob = eob;
inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
recon = CONVERT_TO_BYTEPTR(recon);
inv_txfm_param.bd = xd->bd;
- vpx_highbd_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0,
+ aom_highbd_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0,
NULL, 0, bsw, bsh, xd->bd);
highbd_inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
} else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
- vpx_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0, NULL, 0,
+ aom_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0, NULL, 0,
bsw, bsh);
inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
}
@@ -1100,33 +1100,33 @@
switch (tx_size) {
#if CONFIG_EXT_TX
case TX_4X8:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
- vpx_sum_squares_2d_i16(diff + 4 * diff_stride, diff_stride, 4);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 4) +
+ aom_sum_squares_2d_i16(diff + 4 * diff_stride, diff_stride, 4);
break;
case TX_8X4:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
- vpx_sum_squares_2d_i16(diff + 4, diff_stride, 4);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 4) +
+ aom_sum_squares_2d_i16(diff + 4, diff_stride, 4);
break;
case TX_8X16:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
- vpx_sum_squares_2d_i16(diff + 8 * diff_stride, diff_stride, 8);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 8) +
+ aom_sum_squares_2d_i16(diff + 8 * diff_stride, diff_stride, 8);
break;
case TX_16X8:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
- vpx_sum_squares_2d_i16(diff + 8, diff_stride, 8);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 8) +
+ aom_sum_squares_2d_i16(diff + 8, diff_stride, 8);
break;
case TX_16X32:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
- vpx_sum_squares_2d_i16(diff + 16 * diff_stride, diff_stride, 16);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 16) +
+ aom_sum_squares_2d_i16(diff + 16 * diff_stride, diff_stride, 16);
break;
case TX_32X16:
- sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
- vpx_sum_squares_2d_i16(diff + 16, diff_stride, 16);
+ sse = aom_sum_squares_2d_i16(diff, diff_stride, 16) +
+ aom_sum_squares_2d_i16(diff + 16, diff_stride, 16);
break;
#endif // CONFIG_EXT_TX
default:
assert(tx_size < TX_SIZES);
- sse = vpx_sum_squares_2d_i16(
+ sse = aom_sum_squares_2d_i16(
diff, diff_stride, num_4x4_blocks_wide_txsize_lookup[tx_size] << 2);
break;
}
@@ -1152,8 +1152,8 @@
struct encode_b_args intra_arg = {
x, NULL, &mbmi->skip, args->t_above, args->t_left, 1
};
- vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
- tx_size, &intra_arg);
+ av1_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ &intra_arg);
if (args->cpi->sf.use_transform_domain_distortion) {
dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
@@ -1162,7 +1162,7 @@
// Note that the encode block_intra call above already calls
// inv_txfm_add, so we can't just call dist_block here.
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
- const vpx_variance_fn_t variance = args->cpi->fn_ptr[tx_bsize].vf;
+ const aom_variance_fn_t variance = args->cpi->fn_ptr[tx_bsize].vf;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -1178,10 +1178,10 @@
unsigned int tmp;
sse = sum_squares_2d(diff, diff_stride, tx_size);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
sse = ROUND_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
sse = (int64_t)sse * 16;
variance(src, src_stride, dst, dst_stride, &tmp);
@@ -1190,14 +1190,14 @@
} else {
// full forward transform and quantization
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, coeff_ctx);
#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (x->plane[plane].eobs[block])
- vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+ av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
&sse);
}
@@ -1213,7 +1213,7 @@
rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
// TODO(jingning): temporarily enabled only for luma component
- rd = VPXMIN(rd1, rd2);
+ rd = AOMMIN(rd1, rd2);
args->this_rate += rate;
args->this_dist += dist;
@@ -1228,7 +1228,7 @@
args->skippable &= !x->plane[plane].eobs[block];
}
-static void txfm_rd_in_plane(MACROBLOCK *x, const VP10_COMP *cpi, int *rate,
+static void txfm_rd_in_plane(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
TX_SIZE tx_size, int use_fast_coef_casting) {
@@ -1236,7 +1236,7 @@
const struct macroblockd_plane *const pd = &xd->plane[plane];
TX_TYPE tx_type;
struct rdcost_block_args args;
- vp10_zero(args);
+ av1_zero(args);
args.x = x;
args.cpi = cpi;
args.best_rd = ref_best_rd;
@@ -1245,13 +1245,13 @@
if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+ av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
- &args);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+ &args);
if (args.exit_early) {
*rate = INT_MAX;
*distortion = INT64_MAX;
@@ -1266,18 +1266,17 @@
}
#if CONFIG_SUPERTX
-void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
- int *rate, int64_t *distortion,
- int *skippable, int64_t *sse,
- int64_t ref_best_rd, int plane,
- BLOCK_SIZE bsize, TX_SIZE tx_size,
- int use_fast_coef_casting) {
+void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
+ int64_t *distortion, int *skippable,
+ int64_t *sse, int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int use_fast_coef_casting) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblockd_plane *const pd = &xd->plane[plane];
struct rdcost_block_args args;
TX_TYPE tx_type;
- vp10_zero(args);
+ av1_zero(args);
args.cpi = cpi;
args.x = x;
args.best_rd = ref_best_rd;
@@ -1289,7 +1288,7 @@
if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+ av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
@@ -1311,14 +1310,14 @@
}
#endif // CONFIG_SUPERTX
-static int64_t txfm_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
+static int64_t txfm_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
int *s, int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs, TX_TYPE tx_type, int tx_size) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int64_t rd = INT64_MAX;
- vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
+ aom_prob skip_prob = av1_get_skip_prob(cm, xd);
int s0, s1;
const int is_inter = is_inter_block(mbmi);
const int tx_size_ctx = get_tx_size_context(xd);
@@ -1334,8 +1333,8 @@
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- s0 = vp10_cost_bit(skip_prob, 0);
- s1 = vp10_cost_bit(skip_prob, 1);
+ s0 = av1_cost_bit(skip_prob, 0);
+ s1 = av1_cost_bit(skip_prob, 1);
mbmi->tx_type = tx_type;
mbmi->tx_size = tx_size;
@@ -1382,17 +1381,17 @@
if (tx_select) *r += r_tx_size;
if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*s))
- rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+ rd = AOMMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
return rd;
}
-static int64_t choose_tx_size_fix_type(VP10_COMP *cpi, BLOCK_SIZE bs,
+static int64_t choose_tx_size_fix_type(AV1_COMP *cpi, BLOCK_SIZE bs,
MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *psse, int64_t ref_best_rd,
TX_TYPE tx_type, int prune) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int r, s;
@@ -1510,7 +1509,7 @@
}
#if CONFIG_EXT_INTER
-static int64_t estimate_yrd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
+static int64_t estimate_yrd_for_sb(AV1_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
int *r, int64_t *d, int *s, int64_t *sse,
int64_t ref_best_rd) {
return txfm_yrd(cpi, x, r, d, s, sse, ref_best_rd, bs, DCT_DCT,
@@ -1518,18 +1517,18 @@
}
#endif // CONFIG_EXT_INTER
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_largest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *sse,
int64_t ref_best_rd, BLOCK_SIZE bs) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TX_TYPE tx_type, best_tx_type = DCT_DCT;
int r, s;
int64_t d, psse, this_rd, best_rd = INT64_MAX;
- vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
- int s0 = vp10_cost_bit(skip_prob, 0);
- int s1 = vp10_cost_bit(skip_prob, 1);
+ aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+ int s0 = av1_cost_bit(skip_prob, 0);
+ int s1 = av1_cost_bit(skip_prob, 1);
const int is_inter = is_inter_block(mbmi);
int prune = 0;
#if CONFIG_EXT_TX
@@ -1593,7 +1592,7 @@
else
this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
if (is_inter_block(mbmi) && !xd->lossless[mbmi->segment_id] && !s)
- this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+ this_rd = AOMMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
if (this_rd < best_rd) {
best_rd = this_rd;
@@ -1630,7 +1629,7 @@
else
this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
if (is_inter && !xd->lossless[mbmi->segment_id] && !s)
- this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+ this_rd = AOMMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
if (this_rd < best_rd) {
best_rd = this_rd;
@@ -1645,7 +1644,7 @@
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_smallest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
@@ -1659,10 +1658,10 @@
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_tx_size_type_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion,
- int *skip, int64_t *psse,
- int64_t ref_best_rd, BLOCK_SIZE bs) {
+static void choose_tx_size_type_from_rd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip,
+ int64_t *psse, int64_t ref_best_rd,
+ BLOCK_SIZE bs) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int r, s;
@@ -1709,7 +1708,7 @@
#endif
}
-static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void super_block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *psse,
BLOCK_SIZE bs, int64_t ref_best_rd) {
MACROBLOCKD *xd = &x->e_mbd;
@@ -1748,7 +1747,7 @@
}
static int rd_pick_palette_intra_sby(
- VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
+ AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
int dc_mode_cost, PALETTE_MODE_INFO *palette_mode_info,
uint8_t *best_palette_color_map, TX_SIZE *best_tx, TX_TYPE *best_tx_type,
PREDICTION_MODE *mode_selected, int64_t *best_rd) {
@@ -1764,13 +1763,13 @@
assert(cpi->common.allow_screen_content_tools);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth)
- colors = vp10_count_colors_highbd(src, src_stride, rows, cols,
- cpi->common.bit_depth);
+ colors = av1_count_colors_highbd(src, src_stride, rows, cols,
+ cpi->common.bit_depth);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
- colors = vp10_count_colors(src, src_stride, rows, cols);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ colors = av1_count_colors(src, src_stride, rows, cols);
palette_mode_info->palette_size[0] = 0;
#if CONFIG_EXT_INTRA
mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] = 0;
@@ -1787,15 +1786,15 @@
float lb, ub, val;
MB_MODE_INFO *const mbmi = &mic->mbmi;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
if (cpi->common.use_highbitdepth)
lb = ub = src16[0];
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
lb = ub = src[0];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth) {
for (r = 0; r < rows; ++r) {
for (c = 0; c < cols; ++c) {
@@ -1808,7 +1807,7 @@
}
}
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (r = 0; r < rows; ++r) {
for (c = 0; c < cols; ++c) {
val = src[r * src_stride + c];
@@ -1819,9 +1818,9 @@
ub = val;
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
mbmi->mode = DC_PRED;
#if CONFIG_EXT_INTRA
@@ -1834,21 +1833,21 @@
--n) {
for (i = 0; i < n; ++i)
centroids[i] = lb + (2 * i + 1) * (ub - lb) / n / 2;
- vp10_k_means(data, centroids, color_map, rows * cols, n, 1, max_itr);
- k = vp10_remove_duplicates(centroids, n);
+ av1_k_means(data, centroids, color_map, rows * cols, n, 1, max_itr);
+ k = av1_remove_duplicates(centroids, n);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth)
for (i = 0; i < k; ++i)
pmi->palette_colors[i] =
clip_pixel_highbd((int)centroids[i], cpi->common.bit_depth);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (i = 0; i < k; ++i)
pmi->palette_colors[i] = clip_pixel((int)centroids[i]);
pmi->palette_size[0] = k;
- vp10_calc_indices(data, centroids, color_map, rows * cols, k, 1);
+ av1_calc_indices(data, centroids, color_map, rows * cols, k, 1);
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
bsize, *best_rd);
@@ -1856,16 +1855,16 @@
this_rate =
this_rate_tokenonly + dc_mode_cost +
- cpi->common.bit_depth * k * vp10_cost_bit(128, 0) +
+ cpi->common.bit_depth * k * av1_cost_bit(128, 0) +
cpi->palette_y_size_cost[bsize - BLOCK_8X8][k - 2] +
write_uniform_cost(k, color_map[0]) +
- vp10_cost_bit(
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
+ av1_cost_bit(
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
1);
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
- color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, k,
- color_order);
+ color_ctx = av1_get_palette_color_context(color_map, cols, i, j, k,
+ color_order);
for (r = 0; r < k; ++r)
if (color_map[i * cols + j] == color_order[r]) {
color_idx = r;
@@ -1892,7 +1891,7 @@
return rate_overhead;
}
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+static int64_t rd_pick_intra4x4block(AV1_COMP *cpi, MACROBLOCK *x, int row,
int col, PREDICTION_MODE *best_mode,
const int *bmode_costs, ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l, int *bestrate,
@@ -1913,7 +1912,7 @@
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
uint8_t best_dst[8 * 8];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t best_dst16[8 * 8];
#endif
@@ -1922,7 +1921,7 @@
xd->mi[0]->mbmi.tx_size = TX_4X4;
xd->mi[0]->mbmi.palette_mode_info.palette_size[0] = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -1947,11 +1946,11 @@
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
int16_t *const src_diff =
- vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+ av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
- dst_stride, col + idx, row + idy, 0);
- vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+ av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
+ aom_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
dst_stride, xd->bd);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
@@ -1961,11 +1960,11 @@
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#endif // CONFIG_VAR_TX | CONFIG_NEW_QUANT
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
- BLOCK_8X8, TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, coeff_ctx);
#else
- vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
#if CONFIG_VAR_TX
ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
@@ -1979,9 +1978,9 @@
#endif // CONFIG_VAR_TX
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
- dst_stride, p->eobs[block], xd->bd,
- DCT_DCT, 1);
+ av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], xd->bd,
+ DCT_DCT, 1);
} else {
int64_t dist;
unsigned int tmp;
@@ -1990,13 +1989,13 @@
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
- BLOCK_8X8, TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, coeff_ctx);
#else
- vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+ av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
#if CONFIG_VAR_TX
ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
so->neighbors, cpi->sf.use_fast_coef_costing);
@@ -2007,9 +2006,9 @@
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
#endif // CONFIG_VAR_TX
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
- dst_stride, p->eobs[block], xd->bd,
- tx_type, 0);
+ av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], xd->bd,
+ tx_type, 0);
cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
dist = (int64_t)tmp << 4;
distortion += dist;
@@ -2048,7 +2047,7 @@
return best_rd;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -2073,11 +2072,11 @@
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
int16_t *const src_diff =
- vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+ av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
- dst_stride, col + idx, row + idy, 0);
- vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
+ av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
+ aom_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
@@ -2087,11 +2086,11 @@
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#endif // CONFIG_VAR_TX | CONFIG_NEW_QUANT
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, coeff_ctx);
#else
- vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
- VP10_XFORM_QUANT_B);
+ av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+ AV1_XFORM_QUANT_B);
#endif // CONFIG_NEW_QUANT
#if CONFIG_VAR_TX
ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
@@ -2105,8 +2104,8 @@
#endif
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
- dst_stride, p->eobs[block], DCT_DCT, 1);
+ av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], DCT_DCT, 1);
} else {
int64_t dist;
unsigned int tmp;
@@ -2115,13 +2114,13 @@
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, coeff_ctx);
#else
- vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
- VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+ AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+ av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
#if CONFIG_VAR_TX
ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
so->neighbors, cpi->sf.use_fast_coef_costing);
@@ -2132,8 +2131,8 @@
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
#endif
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
- dst_stride, p->eobs[block], tx_type, 0);
+ av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], tx_type, 0);
cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
dist = (int64_t)tmp << 4;
distortion += dist;
@@ -2173,7 +2172,7 @@
return best_rd;
}
-static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
+static int64_t rd_pick_intra_sub_8x8_y_mode(AV1_COMP *cpi, MACROBLOCK *mb,
int *rate, int *rate_y,
int64_t *distortion,
int64_t best_rd) {
@@ -2210,8 +2209,8 @@
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
if (cpi->common.frame_type == KEY_FRAME) {
- const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
- const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
+ const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, i);
+ const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, i);
bmode_costs = cpi->y_mode_costs[A][L];
}
@@ -2267,7 +2266,7 @@
#if CONFIG_EXT_INTRA
// Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_ext_intra_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int mode_cost, int64_t *best_rd,
@@ -2283,7 +2282,7 @@
EXT_INTRA_MODE_INFO ext_intra_mode_info;
TX_TYPE best_tx_type;
- vp10_zero(ext_intra_mode_info);
+ av1_zero(ext_intra_mode_info);
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 1;
mbmi->mode = DC_PRED;
mbmi->palette_mode_info.palette_size[0] = 0;
@@ -2296,7 +2295,7 @@
if (this_rate_tokenonly == INT_MAX) continue;
this_rate = this_rate_tokenonly +
- vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 1) +
+ av1_cost_bit(cpi->common.fc->ext_intra_probs[0], 1) +
write_uniform_cost(FILTER_INTRA_MODES, mode) + mode_cost;
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
@@ -2328,7 +2327,7 @@
}
static void pick_intra_angle_routine_sby(
- VP10_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
+ AV1_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
int64_t *distortion, int *skippable, int *best_angle_delta,
TX_SIZE *best_tx_size, TX_TYPE *best_tx_type, INTRA_FILTER *best_filter,
BLOCK_SIZE bsize, int rate_overhead, int64_t *best_rd) {
@@ -2355,7 +2354,7 @@
}
}
-static int64_t rd_pick_intra_angle_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_angle_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int rate_overhead, int64_t best_rd) {
@@ -2364,7 +2363,7 @@
MB_MODE_INFO *mbmi = &mic->mbmi;
int this_rate, this_rate_tokenonly, s;
int angle_delta, best_angle_delta = 0, p_angle;
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
INTRA_FILTER filter, best_filter = INTRA_FILTER_LINEAR;
const double rd_adjust = 1.2;
int64_t this_distortion, this_rd;
@@ -2385,7 +2384,7 @@
mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
int64_t tmp_best_rd;
- if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+ if ((FILTER_FAST_SEARCH || !av1_is_intra_filter_switchable(p_angle)) &&
filter != INTRA_FILTER_LINEAR)
continue;
mic->mbmi.intra_filter = filter;
@@ -2430,7 +2429,7 @@
for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
mic->mbmi.intra_filter = filter;
if ((FILTER_FAST_SEARCH ||
- !vp10_is_intra_filter_switchable(p_angle)) &&
+ !av1_is_intra_filter_switchable(p_angle)) &&
filter != INTRA_FILTER_LINEAR)
continue;
pick_intra_angle_routine_sby(
@@ -2450,7 +2449,7 @@
mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
mic->mbmi.intra_filter = filter;
- if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+ if ((FILTER_FAST_SEARCH || !av1_is_intra_filter_switchable(p_angle)) &&
filter != INTRA_FILTER_LINEAR)
continue;
pick_intra_angle_routine_sby(
@@ -2466,7 +2465,7 @@
if (FILTER_FAST_SEARCH && *rate_tokenonly < INT_MAX) {
mbmi->angle_delta[0] = best_angle_delta;
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
+ if (av1_is_intra_filter_switchable(p_angle)) {
for (filter = INTRA_FILTER_LINEAR + 1; filter < INTRA_FILTERS; ++filter) {
mic->mbmi.intra_filter = filter;
pick_intra_angle_routine_sby(
@@ -2534,7 +2533,7 @@
remd = dx % dy;
quot = dx / dy;
remd = remd * 16 / dy;
- index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+ index = gradient_to_angle_bin[sn][AOMMIN(quot, 6)][AOMMIN(remd, 15)];
}
hist[index] += temp;
}
@@ -2561,7 +2560,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_angle_estimation(const uint8_t *src8, int src_stride,
int rows, int cols,
uint8_t *directional_mode_skip_mask) {
@@ -2586,7 +2585,7 @@
remd = dx % dy;
quot = dx / dy;
remd = remd * 16 / dy;
- index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+ index = gradient_to_angle_bin[sn][AOMMIN(quot, 6)][AOMMIN(remd, 15)];
}
hist[index] += temp;
}
@@ -2612,11 +2611,11 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EXT_INTRA
// This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sby_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int64_t best_rd) {
@@ -2628,7 +2627,7 @@
int64_t this_distortion, this_rd;
TX_SIZE best_tx = TX_4X4;
#if CONFIG_EXT_INTRA
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
EXT_INTRA_MODE_INFO ext_intra_mode_info;
int is_directional_mode, rate_overhead, best_angle_delta = 0;
INTRA_FILTER best_filter = INTRA_FILTER_LINEAR;
@@ -2651,8 +2650,8 @@
int palette_ctx = 0;
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
- const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
- const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
+ const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, 0);
+ const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, 0);
const PREDICTION_MODE FINAL_MODE_SEARCH = TM_PRED + 1;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
bmode_costs = cpi->y_mode_costs[A][L];
@@ -2663,7 +2662,7 @@
mic->mbmi.angle_delta[0] = 0;
memset(directional_mode_skip_mask, 0,
sizeof(directional_mode_skip_mask[0]) * INTRA_MODES);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
highbd_angle_estimation(src, src_stride, rows, cols,
directional_mode_skip_mask);
@@ -2728,11 +2727,11 @@
TX_8X8][get_tx_size_context(xd)][mic->mbmi.tx_size];
}
if (cpi->common.allow_screen_content_tools && mic->mbmi.mode == DC_PRED)
- this_rate += vp10_cost_bit(
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+ this_rate += av1_cost_bit(
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
#if CONFIG_EXT_INTRA
if (mic->mbmi.mode == DC_PRED && ALLOW_FILTER_INTRA_MODES)
- this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 0);
+ this_rate += av1_cost_bit(cpi->common.fc->ext_intra_probs[0], 0);
if (is_directional_mode) {
int p_angle;
this_rate +=
@@ -2740,7 +2739,7 @@
MAX_ANGLE_DELTAS + mic->mbmi.angle_delta[0]);
p_angle = mode_to_angle_map[mic->mbmi.mode] +
mic->mbmi.angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle))
+ if (av1_is_intra_filter_switchable(p_angle))
this_rate +=
cpi->intra_filter_cost[intra_filter_ctx][mic->mbmi.intra_filter];
}
@@ -2815,10 +2814,10 @@
}
#if CONFIG_VAR_TX
-void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
- int blk_row, int blk_col, int plane, int block,
- int plane_bsize, int coeff_ctx, int *rate,
- int64_t *dist, int64_t *bsse, int *skip) {
+void av1_tx_block_rd_b(const AV1_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+ int blk_row, int blk_col, int plane, int block,
+ int plane_bsize, int coeff_ctx, int *rate, int64_t *dist,
+ int64_t *bsse, int *skip) {
MACROBLOCKD *xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -2834,12 +2833,12 @@
int src_stride = p->src.stride;
uint8_t *src = &p->src.buf[4 * blk_row * src_stride + 4 * blk_col];
uint8_t *dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, rec_buffer16[MAX_TX_SQUARE]);
uint8_t *rec_buffer;
#else
DECLARE_ALIGNED(16, uint8_t, rec_buffer[MAX_TX_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
@@ -2856,51 +2855,51 @@
max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
- tx_size, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, coeff_ctx);
#else
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
- VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+ av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
// TODO(any): Use dist_block to compute distortion
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
rec_buffer = CONVERT_TO_BYTEPTR(rec_buffer16);
- vpx_highbd_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL,
+ aom_highbd_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL,
0, NULL, 0, bh, bh, xd->bd);
} else {
rec_buffer = (uint8_t *)rec_buffer16;
- vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0,
+ aom_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0,
NULL, 0, bh, bh);
}
#else
- vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0, NULL,
+ aom_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0, NULL,
0, bh, bh);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (blk_row + (bh >> 2) > max_blocks_high ||
blk_col + (bh >> 2) > max_blocks_wide) {
int idx, idy;
- int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
- int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+ int blocks_height = AOMMIN(bh >> 2, max_blocks_high - blk_row);
+ int blocks_width = AOMMIN(bh >> 2, max_blocks_wide - blk_col);
tmp = 0;
for (idy = 0; idy < blocks_height; idy += 2) {
for (idx = 0; idx < blocks_width; idx += 2) {
const int16_t *d = diff + 4 * idy * diff_stride + 4 * idx;
- tmp += vpx_sum_squares_2d_i16(d, diff_stride, 8);
+ tmp += aom_sum_squares_2d_i16(d, diff_stride, 8);
}
}
} else {
- tmp = vpx_sum_squares_2d_i16(diff, diff_stride, bh);
+ tmp = aom_sum_squares_2d_i16(diff, diff_stride, bh);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
tmp = ROUND_POWER_OF_TWO(tmp, (xd->bd - 8) * 2);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
*bsse += tmp * 16;
if (p->eobs[block] > 0) {
@@ -2909,23 +2908,23 @@
inv_txfm_param.tx_size = tx_size;
inv_txfm_param.eob = p->eobs[block];
inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
inv_txfm_param.bd = xd->bd;
highbd_inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
} else {
inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
}
-#else // CONFIG_VP9_HIGHBITDEPTH
+#else // CONFIG_AOM_HIGHBITDEPTH
inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if ((bh >> 2) + blk_col > max_blocks_wide ||
(bh >> 2) + blk_row > max_blocks_high) {
int idx, idy;
unsigned int this_dist;
- int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
- int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+ int blocks_height = AOMMIN(bh >> 2, max_blocks_high - blk_row);
+ int blocks_width = AOMMIN(bh >> 2, max_blocks_wide - blk_col);
tmp = 0;
for (idy = 0; idy < blocks_height; idy += 2) {
for (idx = 0; idx < blocks_width; idx += 2) {
@@ -2948,7 +2947,7 @@
*skip &= (p->eobs[block] == 0);
}
-static void select_tx_block(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
int blk_col, int plane, int block, TX_SIZE tx_size,
BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above,
@@ -2977,7 +2976,7 @@
int64_t sum_dist = 0, sum_bsse = 0;
int64_t sum_rd = INT64_MAX;
- int sum_rate = vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 1);
+ int sum_rate = av1_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 1);
int all_skip = 1;
int tmp_eob = 0;
int zero_blk_rate;
@@ -3029,8 +3028,8 @@
if (cpi->common.tx_mode == TX_MODE_SELECT || tx_size == TX_4X4) {
inter_tx_size[0][0] = tx_size;
- vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
- plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+ av1_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+ plane_bsize, coeff_ctx, rate, dist, bsse, skip);
if ((RDCOST(x->rdmult, x->rddiv, *rate, *dist) >=
RDCOST(x->rdmult, x->rddiv, zero_blk_rate, *bsse) ||
@@ -3047,7 +3046,7 @@
}
if (tx_size > TX_4X4)
- *rate += vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 0);
+ *rate += av1_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 0);
this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *dist);
tmp_eob = p->eobs[block];
}
@@ -3109,7 +3108,7 @@
}
}
-static void inter_block_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void inter_block_yrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3141,7 +3140,7 @@
int pnrate = 0, pnskip = 1;
int64_t pndist = 0, pnsse = 0;
- vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctxa, ctxl);
+ av1_get_entropy_contexts(bsize, TX_4X4, pd, ctxa, ctxl);
memcpy(tx_above, xd->above_txfm_context,
sizeof(TXFM_CONTEXT) * (mi_width >> 1));
memcpy(tx_left, xd->left_txfm_context,
@@ -3157,14 +3156,14 @@
*distortion += pndist;
*sse += pnsse;
*skippable &= pnskip;
- this_rd += VPXMIN(RDCOST(x->rdmult, x->rddiv, pnrate, pndist),
+ this_rd += AOMMIN(RDCOST(x->rdmult, x->rddiv, pnrate, pndist),
RDCOST(x->rdmult, x->rddiv, 0, pnsse));
block += step;
}
}
}
- this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+ this_rd = AOMMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
RDCOST(x->rdmult, x->rddiv, 0, *sse));
if (this_rd > ref_best_rd) is_cost_valid = 0;
@@ -3177,11 +3176,11 @@
}
}
-static int64_t select_tx_size_fix_type(const VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t select_tx_size_fix_type(const AV1_COMP *cpi, MACROBLOCK *x,
int *rate, int64_t *dist, int *skippable,
int64_t *sse, BLOCK_SIZE bsize,
int64_t ref_best_rd, TX_TYPE tx_type) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
@@ -3189,9 +3188,9 @@
#if CONFIG_EXT_TX
int ext_tx_set = get_ext_tx_set(max_tx_size, bsize, is_inter);
#endif // CONFIG_EXT_TX
- vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
- int s0 = vp10_cost_bit(skip_prob, 0);
- int s1 = vp10_cost_bit(skip_prob, 1);
+ aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+ int s0 = av1_cost_bit(skip_prob, 0);
+ int s1 = av1_cost_bit(skip_prob, 1);
int64_t rd;
mbmi->tx_type = tx_type;
@@ -3229,12 +3228,12 @@
rd = RDCOST(x->rdmult, x->rddiv, *rate + s0, *dist);
if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*skippable))
- rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+ rd = AOMMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
return rd;
}
-static void select_tx_type_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void select_tx_type_yrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable,
int64_t *sse, BLOCK_SIZE bsize,
int64_t ref_best_rd) {
@@ -3320,7 +3319,7 @@
memcpy(x->blk_skip[0], best_blk_skip, sizeof(best_blk_skip[0]) * n4);
}
-static void tx_block_rd(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+static void tx_block_rd(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
int blk_col, int plane, int block, TX_SIZE tx_size,
BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *above_ctx,
ENTROPY_CONTEXT *left_ctx, int *rate, int64_t *dist,
@@ -3372,8 +3371,8 @@
default: assert(0 && "Invalid transform size."); break;
}
coeff_ctx = combine_entropy_contexts(ta[0], tl[0]);
- vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
- plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+ av1_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+ plane_bsize, coeff_ctx, rate, dist, bsse, skip);
for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i)
ta[i] = !(p->eobs[block] == 0);
for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i)
@@ -3398,7 +3397,7 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int inter_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int inter_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3412,7 +3411,7 @@
if (is_inter_block(mbmi) && is_cost_valid) {
int plane;
for (plane = 1; plane < MAX_MB_PLANE; ++plane)
- vp10_subtract_plane(x, bsize, plane);
+ av1_subtract_plane(x, bsize, plane);
}
*rate = 0;
@@ -3435,7 +3434,7 @@
ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
- vp10_get_entropy_contexts(bsize, TX_4X4, pd, ta, tl);
+ av1_get_entropy_contexts(bsize, TX_4X4, pd, ta, tl);
for (idy = 0; idy < mi_height; idy += bh) {
for (idx = 0; idx < mi_width; idx += bh) {
@@ -3456,7 +3455,7 @@
*sse += pnsse;
*skippable &= pnskip;
- this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+ this_rd = AOMMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
RDCOST(x->rdmult, x->rddiv, 0, *sse));
if (this_rd > ref_best_rd) {
@@ -3479,7 +3478,7 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int super_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3495,7 +3494,7 @@
if (is_inter_block(mbmi) && is_cost_valid) {
int plane;
for (plane = 1; plane < MAX_MB_PLANE; ++plane)
- vp10_subtract_plane(x, bsize, plane);
+ av1_subtract_plane(x, bsize, plane);
}
*rate = 0;
@@ -3533,7 +3532,7 @@
}
static void rd_pick_palette_intra_sbuv(
- VP10_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
+ AV1_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
PALETTE_MODE_INFO *palette_mode_info, uint8_t *best_palette_color_map,
PREDICTION_MODE *mode_selected, int64_t *best_rd, int *rate,
int *rate_tokenonly, int64_t *distortion, int *skippable) {
@@ -3557,19 +3556,19 @@
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
#endif // CONFIG_EXT_INTRA
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth) {
- colors_u = vp10_count_colors_highbd(src_u, src_stride, rows, cols,
- cpi->common.bit_depth);
- colors_v = vp10_count_colors_highbd(src_v, src_stride, rows, cols,
- cpi->common.bit_depth);
+ colors_u = av1_count_colors_highbd(src_u, src_stride, rows, cols,
+ cpi->common.bit_depth);
+ colors_v = av1_count_colors_highbd(src_v, src_stride, rows, cols,
+ cpi->common.bit_depth);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
- colors_u = vp10_count_colors(src_u, src_stride, rows, cols);
- colors_v = vp10_count_colors(src_v, src_stride, rows, cols);
-#if CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ colors_u = av1_count_colors(src_u, src_stride, rows, cols);
+ colors_v = av1_count_colors(src_v, src_stride, rows, cols);
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
colors = colors_u > colors_v ? colors_u : colors_v;
if (colors > 1 && colors <= 64) {
@@ -3585,7 +3584,7 @@
uint8_t *const color_map = xd->plane[1].color_index_map;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *src_u16 = CONVERT_TO_SHORTPTR(src_u);
uint16_t *src_v16 = CONVERT_TO_SHORTPTR(src_v);
if (cpi->common.use_highbitdepth) {
@@ -3594,14 +3593,14 @@
lb_v = src_v16[0];
ub_v = src_v16[0];
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
lb_u = src_u[0];
ub_u = src_u[0];
lb_v = src_v[0];
ub_v = src_v[0];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
mbmi->uv_mode = DC_PRED;
#if CONFIG_EXT_INTRA
@@ -3609,21 +3608,21 @@
#endif // CONFIG_EXT_INTRA
for (r = 0; r < rows; ++r) {
for (c = 0; c < cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth) {
val_u = src_u16[r * src_stride + c];
val_v = src_v16[r * src_stride + c];
data[(r * cols + c) * 2] = val_u;
data[(r * cols + c) * 2 + 1] = val_v;
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
val_u = src_u[r * src_stride + c];
val_v = src_v[r * src_stride + c];
data[(r * cols + c) * 2] = val_u;
data[(r * cols + c) * 2 + 1] = val_v;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (val_u < lb_u)
lb_u = val_u;
else if (val_u > ub_u)
@@ -3641,16 +3640,16 @@
centroids[i * 2] = lb_u + (2 * i + 1) * (ub_u - lb_u) / n / 2;
centroids[i * 2 + 1] = lb_v + (2 * i + 1) * (ub_v - lb_v) / n / 2;
}
- vp10_k_means(data, centroids, color_map, rows * cols, n, 2, max_itr);
+ av1_k_means(data, centroids, color_map, rows * cols, n, 2, max_itr);
pmi->palette_size[1] = n;
for (i = 1; i < 3; ++i) {
for (j = 0; j < n; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth)
pmi->palette_colors[i * PALETTE_MAX_SIZE + j] = clip_pixel_highbd(
(int)centroids[j * 2 + i - 1], cpi->common.bit_depth);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
pmi->palette_colors[i * PALETTE_MAX_SIZE + j] =
clip_pixel((int)centroids[j * 2 + i - 1]);
}
@@ -3661,16 +3660,16 @@
if (this_rate_tokenonly == INT_MAX) continue;
this_rate =
this_rate_tokenonly + dc_mode_cost +
- 2 * cpi->common.bit_depth * n * vp10_cost_bit(128, 0) +
+ 2 * cpi->common.bit_depth * n * av1_cost_bit(128, 0) +
cpi->palette_uv_size_cost[bsize - BLOCK_8X8][n - 2] +
write_uniform_cost(n, color_map[0]) +
- vp10_cost_bit(
- vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 1);
+ av1_cost_bit(
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 1);
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
- color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, n,
- color_order);
+ color_ctx = av1_get_palette_color_context(color_map, cols, i, j, n,
+ color_order);
for (r = 0; r < n; ++r)
if (color_map[i * cols + j] == color_order[r]) {
color_idx = r;
@@ -3699,7 +3698,7 @@
#if CONFIG_EXT_INTRA
// Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_ext_intra_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int64_t *best_rd) {
@@ -3711,7 +3710,7 @@
EXT_INTRA_MODE mode;
EXT_INTRA_MODE_INFO ext_intra_mode_info;
- vp10_zero(ext_intra_mode_info);
+ av1_zero(ext_intra_mode_info);
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 1;
mbmi->uv_mode = DC_PRED;
mbmi->palette_mode_info.palette_size[1] = 0;
@@ -3723,7 +3722,7 @@
continue;
this_rate = this_rate_tokenonly +
- vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 1) +
+ av1_cost_bit(cpi->common.fc->ext_intra_probs[1], 1) +
cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode] +
write_uniform_cost(FILTER_INTRA_MODES, mode);
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
@@ -3750,7 +3749,7 @@
}
}
-static void pick_intra_angle_routine_sbuv(VP10_COMP *cpi, MACROBLOCK *x,
+static void pick_intra_angle_routine_sbuv(AV1_COMP *cpi, MACROBLOCK *x,
int *rate, int *rate_tokenonly,
int64_t *distortion, int *skippable,
int *best_angle_delta,
@@ -3776,7 +3775,7 @@
}
}
-static int rd_pick_intra_angle_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_intra_angle_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int rate_overhead, int64_t best_rd) {
@@ -3846,7 +3845,7 @@
}
#endif // CONFIG_EXT_INTRA
-static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sbuv_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
TX_SIZE max_tx_size) {
@@ -3898,7 +3897,7 @@
MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
if (mbmi->sb_type >= BLOCK_8X8 && mode == DC_PRED &&
ALLOW_FILTER_INTRA_MODES)
- this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 0);
+ this_rate += av1_cost_bit(cpi->common.fc->ext_intra_probs[1], 0);
#else
if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
&this_sse, bsize, best_rd))
@@ -3907,8 +3906,8 @@
#endif // CONFIG_EXT_INTRA
if (cpi->common.allow_screen_content_tools && mbmi->sb_type >= BLOCK_8X8 &&
mode == DC_PRED)
- this_rate += vp10_cost_bit(
- vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 0);
+ this_rate += av1_cost_bit(
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 0);
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
@@ -3964,7 +3963,7 @@
return best_rd;
}
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_sbuv_dcpred(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize) {
int64_t unused;
@@ -3977,7 +3976,7 @@
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
+static void choose_intra_uv_mode(AV1_COMP *cpi, MACROBLOCK *const x,
PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
TX_SIZE max_tx_size, int *rate_uv,
int *rate_uv_tokenonly, int64_t *dist_uv,
@@ -3998,7 +3997,7 @@
*mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
}
-static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+static int cost_mv_ref(const AV1_COMP *cpi, PREDICTION_MODE mode,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
int is_compound,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4070,7 +4069,7 @@
}
static int set_and_cost_bmi_mvs(
- VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
+ AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME],
int_mv seg_mvs[TOTAL_REFS_PER_FRAME],
#if CONFIG_EXT_INTER
@@ -4095,29 +4094,28 @@
this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
#if CONFIG_EXT_INTER
if (!cpi->common.allow_high_precision_mv ||
- !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+ !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
lower_mv_precision(&this_mv[0].as_mv, 0);
#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
for (idx = 0; idx < 1 + is_compound; ++idx) {
this_mv[idx] = seg_mvs[mbmi->ref_frame[idx]];
- vp10_set_mvcost(x, mbmi->ref_frame[idx]);
+ av1_set_mvcost(x, mbmi->ref_frame[idx]);
thismvcost +=
- vp10_mv_bit_cost(&this_mv[idx].as_mv, &best_ref_mv[idx]->as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT_SUB);
+ av1_mv_bit_cost(&this_mv[idx].as_mv, &best_ref_mv[idx]->as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT_SUB);
}
(void)mvjcost;
(void)mvcost;
#else
- thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
#if !CONFIG_EXT_INTER
if (is_compound) {
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
- thismvcost +=
- vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
- mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
}
#endif // !CONFIG_EXT_INTER
#endif
@@ -4143,24 +4141,24 @@
this_mv[1].as_int = compound_seg_newmvs[1].as_int;
}
if (!cpi->common.allow_high_precision_mv ||
- !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+ !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
lower_mv_precision(&this_mv[0].as_mv, 0);
if (!cpi->common.allow_high_precision_mv ||
- !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+ !av1_use_mv_hp(&best_ref_mv[1]->as_mv))
lower_mv_precision(&this_mv[1].as_mv, 0);
- thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
- thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
break;
case NEW_NEARMV:
case NEW_NEARESTMV:
this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
if (!cpi->common.allow_high_precision_mv ||
- !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+ !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
lower_mv_precision(&this_mv[0].as_mv, 0);
- thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
break;
case NEAR_NEWMV:
@@ -4168,10 +4166,10 @@
this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
if (!cpi->common.allow_high_precision_mv ||
- !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+ !av1_use_mv_hp(&best_ref_mv[1]->as_mv))
lower_mv_precision(&this_mv[1].as_mv, 0);
- thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
break;
case NEAREST_NEARMV:
case NEAR_NEARESTMV:
@@ -4213,8 +4211,8 @@
mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
else
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, mbmi->sb_type, i);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, mbmi->sb_type, i);
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
return cost_mv_ref(cpi, mode, is_compound, mode_ctx) + thismvcost;
@@ -4223,7 +4221,7 @@
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
}
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t encode_inter_mb_segment(AV1_COMP *cpi, MACROBLOCK *x,
int64_t best_yrd, int i, int *labelyrate,
int64_t *distortion, int64_t *sse,
ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
@@ -4238,9 +4236,9 @@
const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
int idx, idy;
const uint8_t *const src =
- &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
uint8_t *const dst =
- &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+ &pd->dst.buf[av1_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
int64_t thisdistortion = 0, thissse = 0;
int thisrate = 0;
TX_SIZE tx_size = mi->mbmi.tx_size;
@@ -4259,24 +4257,23 @@
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
assert(tx_type == DCT_DCT);
- vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
+ av1_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_subtract_block(
- height, width,
- vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
- p->src.stride, dst, pd->dst.stride, xd->bd);
+ aom_highbd_subtract_block(
+ height, width, av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
} else {
- vpx_subtract_block(height, width, vp10_raster_block_offset_int16(
- BLOCK_8X8, i, p->src_diff),
+ aom_subtract_block(height, width,
+ av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
8, src, p->src.stride, dst, pd->dst.stride);
}
#else
- vpx_subtract_block(height, width,
- vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ aom_subtract_block(height, width,
+ av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
8, src, p->src.stride, dst, pd->dst.stride);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
k = i;
for (idy = 0; idy < height / 4; idy += num_4x4_h) {
@@ -4291,14 +4288,14 @@
block = (i ? 2 : 0);
coeff_ctx = combine_entropy_contexts(*(ta + (k & 1)), *(tl + (k >> 1)));
#if CONFIG_NEW_QUANT
- vp10_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
- BLOCK_8X8, tx_size, coeff_ctx);
+ av1_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
+ BLOCK_8X8, tx_size, coeff_ctx);
#else
- vp10_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
- tx_size, VP10_XFORM_QUANT_FP);
+ av1_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
+ tx_size, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
- vp10_optimize_b(x, 0, block, tx_size, coeff_ctx);
+ av1_optimize_b(x, 0, block, tx_size, coeff_ctx);
dist_block(cpi, x, 0, block, idy + (i >> 1), idx + (i & 0x1), tx_size,
&dist, &ssz);
thisdistortion += dist;
@@ -4323,7 +4320,7 @@
#endif // CONFIG_VAR_TX
rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion);
rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse);
- rd = VPXMIN(rd1, rd2);
+ rd = AOMMIN(rd1, rd2);
if (rd >= best_yrd) return INT64_MAX;
}
}
@@ -4382,15 +4379,14 @@
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
p->src.buf =
- &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
pd->pre[0].buf =
- &pd->pre[0]
- .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
+ &pd->pre[0].buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
if (has_second_ref(mbmi))
pd->pre[1].buf =
&pd->pre[1]
- .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
+ .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
}
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -4404,7 +4400,7 @@
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
static int check_best_zero_mv(
- const VP10_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
+ const AV1_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
#if CONFIG_REF_MV && CONFIG_EXT_INTER
const int16_t compound_mode_context[TOTAL_REFS_PER_FRAME],
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4421,7 +4417,7 @@
frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
#if CONFIG_REF_MV
int16_t rfc =
- vp10_mode_context_analyzer(mode_context, ref_frames, bsize, block);
+ av1_mode_context_analyzer(mode_context, ref_frames, bsize, block);
#else
int16_t rfc = mode_context[ref_frames[0]];
#endif
@@ -4506,14 +4502,14 @@
return 1;
}
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
int_mv *frame_mv, int mi_row, int mi_col,
#if CONFIG_EXT_INTER
int_mv *ref_mv_sub8x8[2],
#endif
int_mv single_newmv[TOTAL_REFS_PER_FRAME],
int *rate_mv, const int block) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
MACROBLOCKD *xd = &x->e_mbd;
@@ -4536,17 +4532,17 @@
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
int last_besterr[2] = { INT_MAX, INT_MAX };
const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
};
// Prediction buffer from second frame.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
uint8_t *second_pred;
#else
DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (ref = 0; ref < 2; ++ref) {
#if CONFIG_EXT_INTER
@@ -4563,8 +4559,8 @@
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[ref][i] = xd->plane[i].pre[ref];
- vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
- NULL);
+ av1_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
+ NULL);
}
frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
@@ -4572,13 +4568,13 @@
// Since we have scaled the reference frames to match the size of the current
// frame we must use a unit scaling factor during mode selection.
-#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
- cm->height, cm->use_highbitdepth);
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height, cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
- cm->height);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Allow joint search multiple times iteratively for each reference frame
// and break out of the search loop if it couldn't find a better mv.
@@ -4610,30 +4606,30 @@
#endif
// Get the prediction block from the 'other' reference frame.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
- vp10_highbd_build_inter_predictor(
+ av1_highbd_build_inter_predictor(
ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
&frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, interp_filter,
MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
} else {
second_pred = (uint8_t *)second_pred_alloc_16;
- vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
- second_pred, pw, &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0, interp_filter, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE);
+ av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv,
+ &sf, pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE);
}
#else
- vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
- second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
- pw, ph, 0, interp_filter, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+ pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Do compound motion search on the current reference frame.
if (id) xd->plane[0].pre[0] = ref_yv12[id];
- vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
+ av1_set_mv_search_range(x, &ref_mv[id].as_mv);
// Use the mv result from the single mode as mv predictor.
*best_mv = frame_mv[refs[id]].as_mv;
@@ -4642,16 +4638,16 @@
best_mv->row >>= 3;
#if CONFIG_REF_MV
- vp10_set_mvcost(x, refs[id]);
+ av1_set_mvcost(x, refs[id]);
#endif
// Small-range full-pixel motion search.
bestsme =
- vp10_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
- &ref_mv[id].as_mv, second_pred);
+ av1_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
+ &ref_mv[id].as_mv, second_pred);
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
- second_pred, &cpi->fn_ptr[bsize], 1);
+ bestsme = av1_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
+ second_pred, &cpi->fn_ptr[bsize], 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -4678,8 +4674,8 @@
// If bsize < BLOCK_8X8, adjust pred pointer for this block
if (bsize < BLOCK_8X8)
pd->pre[0].buf =
- &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, block,
- pd->pre[0].stride))
+ &pd->pre[0].buf[(av1_raster_block_offset(BLOCK_8X8, block,
+ pd->pre[0].stride))
<< 3];
bestsme = cpi->find_fractional_mv_step(
@@ -4721,25 +4717,25 @@
xd->plane[i].pre[ref] = backup_yv12[ref][i];
}
#if CONFIG_REF_MV
- vp10_set_mvcost(x, refs[ref]);
+ av1_set_mvcost(x, refs[ref]);
#endif
#if CONFIG_EXT_INTER
if (bsize >= BLOCK_8X8)
#endif // CONFIG_EXT_INTER
- *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
#if CONFIG_EXT_INTER
else
- *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
- &ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
- x->mvcost, MV_COST_WEIGHT);
+ *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+ &ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
#endif // CONFIG_EXT_INTER
}
}
static int64_t rd_pick_best_sub8x8_mode(
- VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+ AV1_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
int mvthresh,
@@ -4761,7 +4757,7 @@
int k, br = 0, idx, idy;
int64_t bd = 0, block_sse = 0;
PREDICTION_MODE this_mode;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const int label_count = 4;
@@ -4783,7 +4779,7 @@
mbmi->tx_size = TX_4X4;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- vp10_zero(*bsi);
+ av1_zero(*bsi);
bsi->segment_rd = best_rd;
bsi->ref_mv[0] = best_ref_mv;
@@ -4838,19 +4834,19 @@
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
#if CONFIG_EXT_INTER
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(xd, mi, frame, mv_ref_list, i, mi_row, mi_col,
- NULL);
+ av1_update_mv_context(xd, mi, frame, mv_ref_list, i, mi_row, mi_col,
+ NULL);
#endif // CONFIG_EXT_INTER
frame_mv[ZEROMV][frame].as_int = 0;
- vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
+ av1_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
#if CONFIG_REF_MV
- ref_mv_stack[ref], &ref_mv_count[ref],
+ ref_mv_stack[ref], &ref_mv_count[ref],
#endif
#if CONFIG_EXT_INTER
- mv_ref_list,
+ mv_ref_list,
#endif // CONFIG_EXT_INTER
- &frame_mv[NEARESTMV][frame],
- &frame_mv[NEARMV][frame]);
+ &frame_mv[NEARESTMV][frame],
+ &frame_mv[NEARMV][frame]);
#if CONFIG_REF_MV
tmp_ref_mv[ref] = frame_mv[NEARESTMV][mbmi->ref_frame[ref]];
@@ -4862,9 +4858,8 @@
#if CONFIG_EXT_INTER
mv_ref_list[0].as_int = frame_mv[NEARESTMV][frame].as_int;
mv_ref_list[1].as_int = frame_mv[NEARMV][frame].as_int;
- vp10_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
- &ref_mvs_sub8x8[0][ref],
- &ref_mvs_sub8x8[1][ref]);
+ av1_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
+ &ref_mvs_sub8x8[0][ref], &ref_mvs_sub8x8[1][ref]);
if (has_second_rf) {
frame_mv[ZERO_ZEROMV][frame].as_int = 0;
@@ -4996,7 +4991,7 @@
#if CONFIG_EXT_INTER
have_newmv_in_inter_mode(this_mode) &&
(seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV ||
- vp10_use_mv_hp(&bsi->ref_mv[0]->as_mv) == 0)
+ av1_use_mv_hp(&bsi->ref_mv[0]->as_mv) == 0)
#else
this_mode == NEWMV &&
(seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV ||
@@ -5035,14 +5030,14 @@
max_mv = x->max_mv_context[mbmi->ref_frame[0]];
else
max_mv =
- VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
+ AOMMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and the best ref mvs of the current block for
// the given reference.
step_param =
- (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
+ (av1_init_search_range(max_mv) + cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -5058,20 +5053,20 @@
if (cpi->sf.adaptive_motion_search) {
mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
- step_param = VPXMAX(step_param, 8);
+ step_param = AOMMAX(step_param, 8);
}
// adjust src pointer for this block
mi_buf_shift(x, i);
- vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
+ av1_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
#if CONFIG_REF_MV
- vp10_set_mvcost(x, mbmi->ref_frame[0]);
+ av1_set_mvcost(x, mbmi->ref_frame[0]);
#endif
- bestsme = vp10_full_pixel_search(
+ bestsme = av1_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
&bsi->ref_mv[0]->as_mv, INT_MAX, 1);
@@ -5105,8 +5100,8 @@
// adjust pred pointer for this block
pd->pre[0].buf =
- &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, i,
- pd->pre[0].stride))
+ &pd->pre[0].buf[(av1_raster_block_offset(BLOCK_8X8, i,
+ pd->pre[0].stride))
<< 3];
best_mv_var = cpi->find_fractional_mv_step(
@@ -5122,10 +5117,10 @@
int this_var;
MV best_mv = x->best_mv.as_mv;
const MV ref_mv = bsi->ref_mv[0]->as_mv;
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
x->best_mv = x->second_best_mv;
if (x->best_mv.as_mv.row * 8 <= maxr &&
@@ -5504,18 +5499,18 @@
*returntotrate = bsi->r;
*returndistortion = bsi->d;
*returnyrate = bsi->segment_yrate;
- *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
+ *skippable = av1_is_skippable_in_plane(x, BLOCK_8X8, 0);
*psse = bsi->sse;
mbmi->mode = bsi->modes[3];
return bsi->segment_rd;
}
-static void estimate_ref_frame_costs(const VP10_COMMON *cm,
+static void estimate_ref_frame_costs(const AV1_COMMON *cm,
const MACROBLOCKD *xd, int segment_id,
unsigned int *ref_costs_single,
unsigned int *ref_costs_comp,
- vpx_prob *comp_mode_p) {
+ aom_prob *comp_mode_p) {
int seg_ref_active =
segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
if (seg_ref_active) {
@@ -5524,28 +5519,28 @@
memset(ref_costs_comp, 0, TOTAL_REFS_PER_FRAME * sizeof(*ref_costs_comp));
*comp_mode_p = 128;
} else {
- vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
- vpx_prob comp_inter_p = 128;
+ aom_prob intra_inter_p = av1_get_intra_inter_prob(cm, xd);
+ aom_prob comp_inter_p = 128;
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
+ comp_inter_p = av1_get_reference_mode_prob(cm, xd);
*comp_mode_p = comp_inter_p;
} else {
*comp_mode_p = 128;
}
- ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
+ ref_costs_single[INTRA_FRAME] = av1_cost_bit(intra_inter_p, 0);
if (cm->reference_mode != COMPOUND_REFERENCE) {
- vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
- vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
+ aom_prob ref_single_p1 = av1_get_pred_prob_single_ref_p1(cm, xd);
+ aom_prob ref_single_p2 = av1_get_pred_prob_single_ref_p2(cm, xd);
#if CONFIG_EXT_REFS
- vpx_prob ref_single_p3 = vp10_get_pred_prob_single_ref_p3(cm, xd);
- vpx_prob ref_single_p4 = vp10_get_pred_prob_single_ref_p4(cm, xd);
- vpx_prob ref_single_p5 = vp10_get_pred_prob_single_ref_p5(cm, xd);
+ aom_prob ref_single_p3 = av1_get_pred_prob_single_ref_p3(cm, xd);
+ aom_prob ref_single_p4 = av1_get_pred_prob_single_ref_p4(cm, xd);
+ aom_prob ref_single_p5 = av1_get_pred_prob_single_ref_p5(cm, xd);
#endif // CONFIG_EXT_REFS
- unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+ unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
ref_costs_single[LAST_FRAME] =
#if CONFIG_EXT_REFS
@@ -5556,33 +5551,33 @@
ref_costs_single[ALTREF_FRAME] = base_cost;
#if CONFIG_EXT_REFS
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+ ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[BWDREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p3, 0);
- ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p3, 0);
- ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p3, 1);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p3, 1);
+ ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p3, 0);
+ ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p3, 0);
+ ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p3, 1);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p3, 1);
- ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p2, 0);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+ ref_costs_single[BWDREF_FRAME] += av1_cost_bit(ref_single_p2, 0);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p4, 0);
- ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p4, 1);
+ ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p4, 0);
+ ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p4, 1);
- ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p5, 0);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p5, 1);
+ ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p5, 0);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p5, 1);
#else
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+ ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 1);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p2, 0);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
#endif // CONFIG_EXT_REFS
} else {
ref_costs_single[LAST_FRAME] = 512;
@@ -5596,14 +5591,14 @@
}
if (cm->reference_mode != SINGLE_REFERENCE) {
- vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
+ aom_prob ref_comp_p = av1_get_pred_prob_comp_ref_p(cm, xd);
#if CONFIG_EXT_REFS
- vpx_prob ref_comp_p1 = vp10_get_pred_prob_comp_ref_p1(cm, xd);
- vpx_prob ref_comp_p2 = vp10_get_pred_prob_comp_ref_p2(cm, xd);
- vpx_prob bwdref_comp_p = vp10_get_pred_prob_comp_bwdref_p(cm, xd);
+ aom_prob ref_comp_p1 = av1_get_pred_prob_comp_ref_p1(cm, xd);
+ aom_prob ref_comp_p2 = av1_get_pred_prob_comp_ref_p2(cm, xd);
+ aom_prob bwdref_comp_p = av1_get_pred_prob_comp_bwdref_p(cm, xd);
#endif // CONFIG_EXT_REFS
- unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+ unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
ref_costs_comp[LAST_FRAME] =
#if CONFIG_EXT_REFS
@@ -5616,24 +5611,24 @@
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
- ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
- ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p, 0);
- ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p, 1);
- ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+ ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[LAST2_FRAME] += av1_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[LAST3_FRAME] += av1_cost_bit(ref_comp_p, 1);
+ ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p, 1);
- ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p1, 1);
- ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p1, 0);
+ ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p1, 1);
+ ref_costs_comp[LAST2_FRAME] += av1_cost_bit(ref_comp_p1, 0);
- ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p2, 0);
- ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p2, 1);
+ ref_costs_comp[LAST3_FRAME] += av1_cost_bit(ref_comp_p2, 0);
+ ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p2, 1);
// NOTE(zoeliu): BWDREF and ALTREF each add an extra cost by coding 1
// more bit.
- ref_costs_comp[BWDREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 0);
- ref_costs_comp[ALTREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 1);
+ ref_costs_comp[BWDREF_FRAME] += av1_cost_bit(bwdref_comp_p, 0);
+ ref_costs_comp[ALTREF_FRAME] += av1_cost_bit(bwdref_comp_p, 1);
#else
- ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
- ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+ ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p, 1);
#endif // CONFIG_EXT_REFS
} else {
ref_costs_comp[LAST_FRAME] = 512;
@@ -5667,12 +5662,12 @@
}
static void setup_buffer_inter(
- VP10_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
+ AV1_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
BLOCK_SIZE block_size, int mi_row, int mi_col,
int_mv frame_nearest_mv[TOTAL_REFS_PER_FRAME],
int_mv frame_near_mv[TOTAL_REFS_PER_FRAME],
struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE]) {
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mi = xd->mi[0];
@@ -5684,10 +5679,10 @@
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
// use the UV scaling factors.
- vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+ av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp10_find_mv_refs(
+ av1_find_mv_refs(
cm, xd, mi, ref_frame,
#if CONFIG_REF_MV
&mbmi_ext->ref_mv_count[ref_frame], mbmi_ext->ref_mv_stack[ref_frame],
@@ -5698,26 +5693,26 @@
candidates, mi_row, mi_col, NULL, NULL, mbmi_ext->mode_context);
// Candidate refinement carried out at encoder and decoder
- vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
- &frame_nearest_mv[ref_frame],
- &frame_near_mv[ref_frame]);
+ av1_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
+ &frame_nearest_mv[ref_frame],
+ &frame_near_mv[ref_frame]);
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
- if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
- vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
- block_size);
+ if (!av1_is_scaled(sf) && block_size >= BLOCK_8X8)
+ av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+ block_size);
}
-static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, int mi_row, int mi_col,
+static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
#if CONFIG_EXT_INTER
int ref_idx, int mv_idx,
#endif // CONFIG_EXT_INTER
int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
int bestsme = INT_MAX;
@@ -5740,7 +5735,7 @@
int cost_list[5];
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, ref);
+ av1_get_scaled_ref_frame(cpi, ref);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -5748,7 +5743,7 @@
pred_mv[2] = x->pred_mv[ref];
#if CONFIG_REF_MV
- vp10_set_mvcost(x, ref);
+ av1_set_mvcost(x, ref);
#endif
if (scaled_ref_frame) {
@@ -5759,17 +5754,17 @@
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[i] = xd->plane[i].pre[ref_idx];
- vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
}
// Work out the size of the first step in the mv step search.
- // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+ // 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
step_param =
- (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2;
} else {
step_param = cpi->mv_step_param;
@@ -5778,8 +5773,8 @@
if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
int boffset =
2 * (b_width_log2_lookup[cm->sb_size] -
- VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
- step_param = VPXMAX(step_param, boffset);
+ AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+ step_param = AOMMAX(step_param, boffset);
}
if (cpi->sf.adaptive_motion_search) {
@@ -5809,7 +5804,7 @@
}
}
- vp10_set_mv_search_range(x, &ref_mv);
+ av1_set_mv_search_range(x, &ref_mv);
mvp_full = pred_mv[x->mv_best_ref_index[ref]];
@@ -5818,9 +5813,9 @@
x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
- bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
- cond_cost_list(cpi, cost_list), &ref_mv,
- INT_MAX, 1);
+ bestsme = av1_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+ cond_cost_list(cpi, cost_list), &ref_mv,
+ INT_MAX, 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -5854,10 +5849,10 @@
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph, 1);
if (try_second) {
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+ const int minc = AOMMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+ const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+ const int minr = AOMMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+ const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
int this_var;
MV best_mv = x->best_mv.as_mv;
@@ -5887,8 +5882,8 @@
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0, 0);
}
}
- *rate_mv = vp10_mv_bit_cost(&x->best_mv.as_mv, &ref_mv, x->nmvjointcost,
- x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = av1_mv_bit_cost(&x->best_mv.as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = x->best_mv.as_mv;
@@ -5910,7 +5905,7 @@
}
#if CONFIG_OBMC
-static void single_motion_search_obmc(VP10_COMP *cpi, MACROBLOCK *x,
+static void single_motion_search_obmc(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row, int mi_col,
const int32_t *wsrc, const int32_t *mask,
#if CONFIG_EXT_INTER
@@ -5919,7 +5914,7 @@
int_mv *tmp_mv, int_mv pred_mv,
int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
int bestsme = INT_MAX;
@@ -5941,10 +5936,10 @@
int tmp_row_max = x->mv_row_max;
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, ref);
+ av1_get_scaled_ref_frame(cpi, ref);
#if CONFIG_REF_MV
- vp10_set_mvcost(x, ref);
+ av1_set_mvcost(x, ref);
#endif
if (scaled_ref_frame) {
@@ -5955,17 +5950,17 @@
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[i] = xd->plane[i].pre[ref_idx];
- vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
}
// Work out the size of the first step in the mv step search.
- // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+ // 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
step_param =
- (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2;
} else {
step_param = cpi->mv_step_param;
@@ -5974,8 +5969,8 @@
if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
int boffset =
2 * (b_width_log2_lookup[cm->sb_size] -
- VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
- step_param = VPXMAX(step_param, boffset);
+ AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+ step_param = AOMMAX(step_param, boffset);
}
if (cpi->sf.adaptive_motion_search) {
@@ -6005,13 +6000,13 @@
}
}
- vp10_set_mv_search_range(x, &ref_mv);
+ av1_set_mv_search_range(x, &ref_mv);
mvp_full = pred_mv.as_mv;
mvp_full.col >>= 3;
mvp_full.row >>= 3;
- bestsme = vp10_obmc_full_pixel_diamond(
+ bestsme = av1_obmc_full_pixel_diamond(
cpi, x, wsrc, mask, &mvp_full, step_param, sadpb,
MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
&tmp_mv->as_mv, ref_idx);
@@ -6023,15 +6018,15 @@
if (bestsme < INT_MAX) {
int dis;
- vp10_find_best_obmc_sub_pixel_tree_up(
+ av1_find_best_obmc_sub_pixel_tree_up(
cpi, x, wsrc, mask, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
cpi->sf.use_upsampled_references);
}
- *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
- x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
if (scaled_ref_frame) {
int i;
@@ -6042,13 +6037,13 @@
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
-static void do_masked_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void do_masked_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
const uint8_t *mask, int mask_stride,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv, int ref_idx,
int mv_idx) {
MACROBLOCKD *xd = &x->e_mbd;
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
int bestsme = INT_MAX;
@@ -6064,7 +6059,7 @@
int tmp_row_max = x->mv_row_max;
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, ref);
+ av1_get_scaled_ref_frame(cpi, ref);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -6072,7 +6067,7 @@
pred_mv[2] = x->pred_mv[ref];
#if CONFIG_REF_MV
- vp10_set_mvcost(x, ref);
+ av1_set_mvcost(x, ref);
#endif
if (scaled_ref_frame) {
@@ -6083,10 +6078,10 @@
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[i] = xd->plane[i].pre[ref_idx];
- vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
}
- vp10_set_mv_search_range(x, &ref_mv);
+ av1_set_mv_search_range(x, &ref_mv);
// Work out the size of the first step in the mv step search.
// 0 here is maximum length first step. 1 is MAX >> 1 etc.
@@ -6095,7 +6090,7 @@
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
step_param =
- (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2;
} else {
step_param = cpi->mv_step_param;
@@ -6105,8 +6100,8 @@
if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size && cm->show_frame) {
int boffset =
2 * (b_width_log2_lookup[cm->sb_size] -
- VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
- step_param = VPXMAX(step_param, boffset);
+ AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+ step_param = AOMMAX(step_param, boffset);
}
if (cpi->sf.adaptive_motion_search) {
@@ -6141,7 +6136,7 @@
mvp_full.col >>= 3;
mvp_full.row >>= 3;
- bestsme = vp10_masked_full_pixel_diamond(
+ bestsme = av1_masked_full_pixel_diamond(
cpi, x, mask, mask_stride, &mvp_full, step_param, sadpb,
MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
&tmp_mv->as_mv, ref_idx);
@@ -6153,15 +6148,15 @@
if (bestsme < INT_MAX) {
int dis; /* TODO: use dis in distortion calculation later. */
- vp10_find_best_masked_sub_pixel_tree_up(
+ av1_find_best_masked_sub_pixel_tree_up(
cpi, x, mask, mask_stride, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
cpi->sf.use_upsampled_references);
}
- *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
- x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
if (cpi->sf.adaptive_motion_search && cm->show_frame)
x->pred_mv[ref] = tmp_mv->as_mv;
@@ -6173,7 +6168,7 @@
}
}
-static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
+static void do_masked_motion_search_indexed(AV1_COMP *cpi, MACROBLOCK *x,
int wedge_index, int wedge_sign,
BLOCK_SIZE bsize, int mi_row,
int mi_col, int_mv *tmp_mv,
@@ -6185,7 +6180,7 @@
BLOCK_SIZE sb_type = mbmi->sb_type;
const uint8_t *mask;
const int mask_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
- mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+ mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
if (which == 0 || which == 2)
do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
@@ -6193,7 +6188,7 @@
if (which == 1 || which == 2) {
// get the negative mask
- mask = vp10_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
+ mask = av1_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
&tmp_mv[1], &rate_mv[1], 1, mv_idx[1]);
}
@@ -6207,7 +6202,7 @@
// However, once established that vector may be usable through the nearest and
// near mv modes to reduce distortion in subsequent blocks and also improve
// visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
+static int discount_newmv_test(const AV1_COMP *cpi, int this_mode,
int_mv this_mv,
int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME],
int ref_frame) {
@@ -6219,9 +6214,9 @@
(mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
}
-#define LEFT_TOP_MARGIN ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+#define LEFT_TOP_MARGIN ((AOM_ENC_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
#define RIGHT_BOTTOM_MARGIN \
- ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+ ((AOM_ENC_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
// TODO(jingning): this mv clamping function should be block size dependent.
static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
@@ -6232,7 +6227,7 @@
}
#if CONFIG_EXT_INTER
-static int estimate_wedge_sign(const VP10_COMP *cpi, const MACROBLOCK *x,
+static int estimate_wedge_sign(const AV1_COMP *cpi, const MACROBLOCK *x,
const BLOCK_SIZE bsize, const uint8_t *pred0,
int stride0, const uint8_t *pred1, int stride1) {
const struct macroblock_plane *const p = &x->plane[0];
@@ -6244,12 +6239,12 @@
uint32_t esq[2][4], var;
int64_t tl, br;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
pred0 = CONVERT_TO_BYTEPTR(pred0);
pred1 = CONVERT_TO_BYTEPTR(pred1);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
var = cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]);
var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2,
@@ -6279,11 +6274,11 @@
#if !CONFIG_DUAL_FILTER
static INTERP_FILTER predict_interp_filter(
- const VP10_COMP *cpi, const MACROBLOCK *x, const BLOCK_SIZE bsize,
+ const AV1_COMP *cpi, const MACROBLOCK *x, const BLOCK_SIZE bsize,
const int mi_row, const int mi_col,
INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME]) {
INTERP_FILTER best_filter = SWITCHABLE;
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
const MACROBLOCKD *xd = &x->e_mbd;
int bsl = mi_width_log2_lookup[bsize];
int pred_filter_search =
@@ -6383,7 +6378,7 @@
best_filter = EIGHTTAP_REGULAR;
}
#if CONFIG_EXT_INTERP
- else if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+ else if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
best_filter = EIGHTTAP_REGULAR;
}
#endif
@@ -6394,7 +6389,7 @@
#if CONFIG_EXT_INTER
// Choose the best wedge index and sign
-static int64_t pick_wedge(const VP10_COMP *const cpi, const MACROBLOCK *const x,
+static int64_t pick_wedge(const AV1_COMP *const cpi, const MACROBLOCK *const x,
const BLOCK_SIZE bsize, const uint8_t *const p0,
const uint8_t *const p1, int *const best_wedge_sign,
int *const best_wedge_index) {
@@ -6411,12 +6406,12 @@
int wedge_types = (1 << get_wedge_bits_lookup(bsize));
const uint8_t *mask;
uint64_t sse;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
#else
const int bd_round = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(32, int16_t, r0[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
@@ -6425,34 +6420,34 @@
int64_t sign_limit;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (hbd) {
- vpx_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
+ aom_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
- vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+ aom_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
- vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+ aom_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
} else // NOLINT
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
- vpx_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
- vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
- vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+ aom_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
+ aom_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+ aom_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
}
- sign_limit = ((int64_t)vpx_sum_squares_i16(r0, N) -
- (int64_t)vpx_sum_squares_i16(r1, N)) *
+ sign_limit = ((int64_t)aom_sum_squares_i16(r0, N) -
+ (int64_t)aom_sum_squares_i16(r1, N)) *
(1 << WEDGE_WEIGHT_BITS) / 2;
- vp10_wedge_compute_delta_squares(ds, r0, r1, N);
+ av1_wedge_compute_delta_squares(ds, r0, r1, N);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
- mask = vp10_get_contiguous_soft_mask(wedge_index, 0, bsize);
- wedge_sign = vp10_wedge_sign_from_residuals(ds, mask, N, sign_limit);
+ mask = av1_get_contiguous_soft_mask(wedge_index, 0, bsize);
+ wedge_sign = av1_wedge_sign_from_residuals(ds, mask, N, sign_limit);
- mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
- sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+ mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+ sse = av1_wedge_sse_from_residuals(r1, d10, mask, N);
sse = ROUND_POWER_OF_TWO(sse, bd_round);
model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
@@ -6470,7 +6465,7 @@
// Choose the best wedge index the specified sign
static int64_t pick_wedge_fixed_sign(
- const VP10_COMP *const cpi, const MACROBLOCK *const x,
+ const AV1_COMP *const cpi, const MACROBLOCK *const x,
const BLOCK_SIZE bsize, const uint8_t *const p0, const uint8_t *const p1,
const int wedge_sign, int *const best_wedge_index) {
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -6485,32 +6480,32 @@
int wedge_types = (1 << get_wedge_bits_lookup(bsize));
const uint8_t *mask;
uint64_t sse;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
#else
const int bd_round = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (hbd) {
- vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+ aom_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
- vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+ aom_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
} else // NOLINT
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
{
- vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
- vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+ aom_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+ aom_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
}
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
- mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
- sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+ mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+ sse = av1_wedge_sse_from_residuals(r1, d10, mask, N);
sse = ROUND_POWER_OF_TWO(sse, bd_round);
model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
@@ -6525,7 +6520,7 @@
return best_rd;
}
-static int64_t pick_interinter_wedge(const VP10_COMP *const cpi,
+static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
@@ -6552,7 +6547,7 @@
return rd;
}
-static int64_t pick_interintra_wedge(const VP10_COMP *const cpi,
+static int64_t pick_interintra_wedge(const AV1_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
@@ -6574,7 +6569,7 @@
#endif // CONFIG_EXT_INTER
static int64_t handle_inter_mode(
- VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+ AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
int *disable_skip, int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME], int mi_row,
int mi_col,
@@ -6593,7 +6588,7 @@
INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME],
int (*single_skippable)[TOTAL_REFS_PER_FRAME], int64_t *psse,
const int64_t ref_best_rd) {
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
@@ -6613,14 +6608,14 @@
cpi->interintra_mode_cost[size_group_lookup[bsize]];
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
#if CONFIG_REF_MV
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
#endif
#endif // CONFIG_EXT_INTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf_[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
uint8_t *tmp_buf;
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -6685,15 +6680,15 @@
mode_ctx = mbmi_ext->compound_mode_context[refs[0]];
else
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, -1);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, -1);
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf_);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
tmp_buf = tmp_buf_;
if (is_comp_pred) {
@@ -6718,28 +6713,28 @@
single_newmv, &rate_mv, 0);
} else {
#if CONFIG_REF_MV
- vp10_set_mvcost(x, mbmi->ref_frame[0]);
+ av1_set_mvcost(x, mbmi->ref_frame[0]);
#endif // CONFIG_REF_MV
- rate_mv = vp10_mv_bit_cost(
- &frame_mv[refs[0]].as_mv, &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
#if CONFIG_REF_MV
- vp10_set_mvcost(x, mbmi->ref_frame[1]);
+ av1_set_mvcost(x, mbmi->ref_frame[1]);
#endif // CONFIG_REF_MV
- rate_mv += vp10_mv_bit_cost(
+ rate_mv += av1_mv_bit_cost(
&frame_mv[refs[1]].as_mv, &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
- rate_mv = vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv = av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
} else {
frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
- rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
#else
// Initialize mv using single prediction mode result.
@@ -6751,17 +6746,17 @@
single_newmv, &rate_mv, 0);
} else {
#if CONFIG_REF_MV
- vp10_set_mvcost(x, mbmi->ref_frame[0]);
+ av1_set_mvcost(x, mbmi->ref_frame[0]);
#endif // CONFIG_REF_MV
- rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
#if CONFIG_REF_MV
- vp10_set_mvcost(x, mbmi->ref_frame[1]);
+ av1_set_mvcost(x, mbmi->ref_frame[1]);
#endif // CONFIG_REF_MV
- rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv += av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
#endif // CONFIG_EXT_INTER
} else {
@@ -6790,7 +6785,7 @@
// motion field, where the distortion gain for a single block may not
// be enough to overcome the cost of a new mv.
if (discount_newmv_test(cpi, this_mode, x->best_mv, mode_mv, refs[0])) {
- rate_mv = VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+ rate_mv = AOMMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
}
*rate2 += rate_mv;
@@ -6815,7 +6810,7 @@
if (this_mode == NEAREST_NEARESTMV) {
#else
if (this_mode == NEARESTMV && is_comp_pred) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
#endif // CONFIG_EXT_INTER
if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv;
@@ -6873,7 +6868,7 @@
}
#else
if (this_mode == NEARMV && is_comp_pred) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
int ref_mv_idx = mbmi->ref_mv_idx + 1;
cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
@@ -6909,10 +6904,10 @@
if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
refs[0])) {
#if CONFIG_REF_MV && CONFIG_EXT_INTER
- *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
+ *rate2 += AOMMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
cost_mv_ref(cpi, NEARESTMV, is_comp_pred, mode_ctx));
#else
- *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
+ *rate2 += AOMMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
cost_mv_ref(cpi, NEARESTMV, mode_ctx));
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
} else {
@@ -6969,7 +6964,7 @@
#else
mbmi->interp_filter = i;
#endif
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
if (i > 0 && intpel_mv && IsInterpolatingFilter(i)) {
@@ -7005,7 +7000,7 @@
xd->plane[j].dst.stride = MAX_SB_SIZE;
}
}
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &rate_sum,
&dist_sum, &tmp_skip_sb, &tmp_skip_sse);
@@ -7076,7 +7071,7 @@
mbmi->interp_filter =
cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
#endif
- rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
+ rs = cm->interp_filter == SWITCHABLE ? av1_get_switchable_rate(cpi, xd) : 0;
#if CONFIG_EXT_INTER
#if CONFIG_OBMC
@@ -7094,10 +7089,10 @@
int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb;
- rs = vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+ rs = av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
mbmi->use_wedge_interinter = 0;
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- vp10_subtract_plane(x, bsize, 0);
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
if (rd != INT64_MAX)
@@ -7114,12 +7109,12 @@
int strides[1] = { bw };
mbmi->use_wedge_interinter = 1;
- rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
- vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+ rs = av1_cost_literal(get_interinter_wedge_bits(bsize)) +
+ av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
- vp10_build_inter_predictors_for_planes_single_buf(
+ av1_build_inter_predictors_for_planes_single_buf(
xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
- vp10_build_inter_predictors_for_planes_single_buf(
+ av1_build_inter_predictors_for_planes_single_buf(
xd, bsize, 0, 0, mi_row, mi_col, 1, preds1, strides);
// Choose the best wedge
@@ -7152,7 +7147,7 @@
tmp_rate_mv = rate_mvs[1];
mbmi->mv[1].as_int = tmp_mv[1].as_int;
}
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
@@ -7162,10 +7157,10 @@
mbmi->mv[0].as_int = cur_mv[0].as_int;
mbmi->mv[1].as_int = cur_mv[1].as_int;
tmp_rate_mv = rate_mv;
- vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
- strides, preds1, strides);
+ av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+ strides, preds1, strides);
}
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
rd =
estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7188,9 +7183,9 @@
xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
}
} else {
- vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
- strides, preds1, strides);
- vp10_subtract_plane(x, bsize, 0);
+ av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+ strides, preds1, strides);
+ av1_subtract_plane(x, bsize, 0);
rd =
estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7205,19 +7200,19 @@
}
}
if (ref_best_rd < INT64_MAX &&
- VPXMIN(best_rd_wedge, best_rd_nowedge) / 3 > ref_best_rd)
+ AOMMIN(best_rd_wedge, best_rd_nowedge) / 3 > ref_best_rd)
return INT64_MAX;
pred_exists = 0;
- tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
+ tmp_rd = AOMMIN(best_rd_wedge, best_rd_nowedge);
if (mbmi->use_wedge_interinter)
*compmode_wedge_cost =
- vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
- vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+ av1_cost_literal(get_interinter_wedge_bits(bsize)) +
+ av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
else
*compmode_wedge_cost =
- vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+ av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
}
if (is_comp_interintra_pred) {
@@ -7236,11 +7231,11 @@
DECLARE_ALIGNED(16, uint8_t, intrapred_[2 * MAX_SB_SQUARE]);
uint8_t *intrapred;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
intrapred = CONVERT_TO_BYTEPTR(intrapred_);
else
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
intrapred = intrapred_;
mbmi->ref_frame[1] = NONE;
@@ -7248,7 +7243,7 @@
xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE;
xd->plane[j].dst.stride = bw;
}
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
restore_dst_buf(xd, orig_dst, orig_dst_stride);
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->use_wedge_interintra = 0;
@@ -7256,8 +7251,8 @@
for (j = 0; j < INTERINTRA_MODES; ++j) {
mbmi->interintra_mode = (INTERINTRA_MODE)j;
rmode = interintra_mode_cost[mbmi->interintra_mode];
- vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
- vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+ av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+ av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
@@ -7268,9 +7263,9 @@
}
mbmi->interintra_mode = best_interintra_mode;
rmode = interintra_mode_cost[mbmi->interintra_mode];
- vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
- vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
- vp10_subtract_plane(x, bsize, 0);
+ av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+ av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+ av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
if (rd != INT64_MAX)
@@ -7281,7 +7276,7 @@
return INT64_MAX;
}
if (is_interintra_wedge_used(bsize)) {
- rwedge = vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 0);
+ rwedge = av1_cost_bit(cm->fc->wedge_interintra_prob[bsize], 0);
if (rd != INT64_MAX)
rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_mv + rwedge + rate_sum,
dist_sum);
@@ -7291,8 +7286,8 @@
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
mbmi->use_wedge_interintra = 1;
- rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) +
- vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
+ rwedge = av1_cost_literal(get_interintra_wedge_bits(bsize)) +
+ av1_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
best_interintra_rd_wedge =
pick_interintra_wedge(cpi, x, bsize, intrapred_, tmp_buf_);
@@ -7302,12 +7297,12 @@
// Refine motion vector.
if (have_newmv_in_inter_mode(this_mode)) {
// get negative of mask
- const uint8_t *mask = vp10_get_contiguous_soft_mask(
+ const uint8_t *mask = av1_get_contiguous_soft_mask(
mbmi->interintra_wedge_index, 1, bsize);
do_masked_motion_search(cpi, x, mask, bw, bsize, mi_row, mi_col,
&tmp_mv, &tmp_rate_mv, 0, mv_idx);
mbmi->mv[0].as_int = tmp_mv.as_int;
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv,
@@ -7321,10 +7316,10 @@
} else {
tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv;
- vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+ av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
}
// Evaluate closer to true rd
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
rd =
estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7352,23 +7347,23 @@
pred_exists = 0;
tmp_rd = best_interintra_rd;
*compmode_interintra_cost =
- vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
+ av1_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
*compmode_interintra_cost += interintra_mode_cost[mbmi->interintra_mode];
if (is_interintra_wedge_used(bsize)) {
- *compmode_interintra_cost += vp10_cost_bit(
+ *compmode_interintra_cost += av1_cost_bit(
cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
if (mbmi->use_wedge_interintra) {
*compmode_interintra_cost +=
- vp10_cost_literal(get_interintra_wedge_bits(bsize));
+ av1_cost_literal(get_interintra_wedge_bits(bsize));
}
}
} else if (is_interintra_allowed(mbmi)) {
*compmode_interintra_cost =
- vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
+ av1_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
}
#if CONFIG_EXT_INTERP
- if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+ if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
#if CONFIG_DUAL_FILTER
for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = EIGHTTAP_REGULAR;
#else
@@ -7395,7 +7390,7 @@
// Handles the special case when a filter that is not in the
// switchable list (ex. bilinear) is indicated at the frame level, or
// skip condition holds.
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
&tmp_dist, &skip_txfm_sb, &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
@@ -7413,7 +7408,7 @@
const int mode0 = compound_ref0_mode(this_mode);
const int mode1 = compound_ref1_mode(this_mode);
int64_t mrd =
- VPXMIN(modelled_rd[mode0][refs[0]], modelled_rd[mode1][refs[1]]);
+ AOMMIN(modelled_rd[mode0][refs[0]], modelled_rd[mode1][refs[1]]);
if (rd / 4 * 3 > mrd && ref_best_rd < INT64_MAX) {
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return INT64_MAX;
@@ -7486,7 +7481,7 @@
&tmp_mv, pred_mv, &tmp_rate_mv);
mbmi->mv[0].as_int = tmp_mv.as_int;
if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
- tmp_rate_mv = VPXMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+ tmp_rate_mv = AOMMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
#if CONFIG_EXT_INTER
tmp_rate2 = rate2_bmc_nocoeff - rate_mv_bmc + tmp_rate_mv;
@@ -7500,21 +7495,21 @@
if (!has_subpel_mv_component(xd->mi[0], xd, 1))
obmc_interp_filter[1][1] = mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
#else
- if (!vp10_is_interp_needed(xd))
+ if (!av1_is_interp_needed(xd))
obmc_interp_filter[1] = mbmi->interp_filter = EIGHTTAP_REGULAR;
#endif // CONFIG_DUAL_FILTER
// This is not quite correct with CONFIG_DUAL_FILTER when a filter
// is needed in only one direction
- if (!vp10_is_interp_needed(xd)) tmp_rate2 -= rs;
+ if (!av1_is_interp_needed(xd)) tmp_rate2 -= rs;
#endif // CONFIG_EXT_INTERP
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
#if CONFIG_EXT_INTER
} else {
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
#endif // CONFIG_EXT_INTER
}
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
- dst_stride1, dst_buf2, dst_stride2);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
&tmp_dist, &skip_txfm_sb, &skip_sse_sb);
}
@@ -7537,7 +7532,7 @@
int64_t rdcosty = INT64_MAX;
// Y cost and distortion
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
#if CONFIG_VAR_TX
if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
select_tx_type_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
@@ -7576,7 +7571,7 @@
*distortion += distortion_y;
rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
- rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
+ rdcosty = AOMMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
#if CONFIG_VAR_TX
if (!inter_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
@@ -7605,25 +7600,24 @@
*rate2 -= *rate_uv + *rate_y;
*rate_y = 0;
*rate_uv = 0;
- *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
mbmi->skip = 0;
// here mbmi->skip temporarily plays a role as what this_skip2 does
} else if (!xd->lossless[mbmi->segment_id] &&
(RDCOST(x->rdmult, x->rddiv,
*rate_y + *rate_uv +
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
*distortion) >=
RDCOST(x->rdmult, x->rddiv,
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1),
- *psse))) {
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 1), *psse))) {
*rate2 -= *rate_uv + *rate_y;
- *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
*distortion = *psse;
*rate_y = 0;
*rate_uv = 0;
mbmi->skip = 1;
} else {
- *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
mbmi->skip = 0;
}
*disable_skip = 0;
@@ -7636,7 +7630,7 @@
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
mbmi->skip = 0;
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
- *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
*distortion = skip_sse_sb;
*psse = skip_sse_sb;
@@ -7670,18 +7664,18 @@
best_skippable = *skippable;
best_xskip = x->skip;
best_disable_skip = *disable_skip;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->recon_variance = vp10_high_get_sby_perpixel_variance(
+ x->recon_variance = av1_high_get_sby_perpixel_variance(
cpi, &xd->plane[0].dst, bsize, xd->bd);
} else {
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -7709,28 +7703,28 @@
if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
#if !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->recon_variance = vp10_high_get_sby_perpixel_variance(
+ x->recon_variance = av1_high_get_sby_perpixel_variance(
cpi, &xd->plane[0].dst, bsize, xd->bd);
} else {
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return 0; // The rate-distortion cost will be re-calculated by caller.
}
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rd_pick_intra_mode_sb(AV1_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblockd_plane *const pd = xd->plane;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
@@ -7758,15 +7752,15 @@
max_uv_tx_size = get_uv_tx_size_impl(
xd->mi[0]->mbmi.tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly, &dist_uv,
- &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
+ &uv_skip, AOMMAX(BLOCK_8X8, bsize), max_uv_tx_size);
if (y_skip && uv_skip) {
rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
rd_cost->dist = dist_y + dist_uv;
} else {
rd_cost->rate =
- rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate_y + rate_uv + av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
rd_cost->dist = dist_y + dist_uv;
}
@@ -7806,18 +7800,18 @@
// to a predictor with a low spatial complexity compared to the source.
if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
(source_variance > recon_variance)) {
- var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
+ var_factor = AOMMIN(absvar_diff, AOMMIN(VLOW_ADJ_MAX, var_error));
// A second possible case of interest is where the source variance
// is very low and we wish to discourage false texture or motion trails.
} else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
(recon_variance > source_variance)) {
- var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
+ var_factor = AOMMIN(absvar_diff, AOMMIN(VHIGH_ADJ_MAX, var_error));
}
*this_rd += (*this_rd * var_factor) / 100;
}
// Do we have an internal image edge (e.g. formatting bars).
-int vp10_internal_image_edge(VP10_COMP *cpi) {
+int av1_internal_image_edge(AV1_COMP *cpi) {
return (cpi->oxcf.pass == 2) &&
((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
(cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
@@ -7826,7 +7820,7 @@
// Checks to see if a super block is on a horizontal image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
+int av1_active_h_edge(AV1_COMP *cpi, int mi_row, int mi_step) {
int top_edge = 0;
int bottom_edge = cpi->common.mi_rows;
int is_active_h_edge = 0;
@@ -7840,7 +7834,7 @@
top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
- bottom_edge = VPXMAX(top_edge, bottom_edge);
+ bottom_edge = AOMMAX(top_edge, bottom_edge);
}
if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
@@ -7853,7 +7847,7 @@
// Checks to see if a super block is on a vertical image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
+int av1_active_v_edge(AV1_COMP *cpi, int mi_col, int mi_step) {
int left_edge = 0;
int right_edge = cpi->common.mi_cols;
int is_active_v_edge = 0;
@@ -7867,7 +7861,7 @@
left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
- right_edge = VPXMAX(left_edge, right_edge);
+ right_edge = AOMMAX(left_edge, right_edge);
}
if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
@@ -7880,12 +7874,12 @@
// Checks to see if a super block is at the edge of the active image.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
- return vp10_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
- vp10_active_v_edge(cpi, mi_col, cpi->common.mib_size);
+int av1_active_edge_sb(AV1_COMP *cpi, int mi_row, int mi_col) {
+ return av1_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
+ av1_active_v_edge(cpi, mi_col, cpi->common.mib_size);
}
-static void restore_uv_color_map(VP10_COMP *cpi, MACROBLOCK *x) {
+static void restore_uv_color_map(AV1_COMP *cpi, MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
@@ -7901,25 +7895,25 @@
float centroids[2 * PALETTE_MAX_SIZE];
uint8_t *const color_map = xd->plane[1].color_index_map;
int r, c;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint16_t *const src_u16 = CONVERT_TO_SHORTPTR(src_u);
const uint16_t *const src_v16 = CONVERT_TO_SHORTPTR(src_v);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
(void)cpi;
for (r = 0; r < rows; ++r) {
for (c = 0; c < cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cpi->common.use_highbitdepth) {
data[(r * cols + c) * 2] = src_u16[r * src_stride + c];
data[(r * cols + c) * 2 + 1] = src_v16[r * src_stride + c];
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
data[(r * cols + c) * 2] = src_u[r * src_stride + c];
data[(r * cols + c) * 2 + 1] = src_v[r * src_stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -7929,13 +7923,13 @@
}
}
- vp10_calc_indices(data, centroids, color_map, rows * cols,
- pmi->palette_size[1], 2);
+ av1_calc_indices(data, centroids, color_map, rows * cols,
+ pmi->palette_size[1], 2);
}
#if CONFIG_EXT_INTRA
static void pick_ext_intra_interframe(
- VP10_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+ AV1_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
int *rate_uv_intra, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv,
PREDICTION_MODE *mode_uv, EXT_INTRA_MODE_INFO *ext_intra_mode_info_uv,
PALETTE_MODE_INFO *pmi_uv, int8_t *uv_angle_delta, int palette_ctx,
@@ -7946,7 +7940,7 @@
int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, RD_COST *rd_cost) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
@@ -7958,8 +7952,8 @@
TX_SIZE uv_tx;
for (i = 0; i < MAX_MODES; ++i)
- if (vp10_mode_order[i].mode == DC_PRED &&
- vp10_mode_order[i].ref_frame[0] == INTRA_FRAME)
+ if (av1_mode_order[i].mode == DC_PRED &&
+ av1_mode_order[i].ref_frame[0] == INTRA_FRAME)
break;
dc_mode_index = i;
assert(i < MAX_MODES);
@@ -8008,8 +8002,8 @@
rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
- rate2 += vp10_cost_bit(
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+ rate2 += av1_cost_bit(
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
if (!xd->lossless[mbmi->segment_id]) {
// super_block_yrd above includes the cost of the tx_size in the
@@ -8020,8 +8014,8 @@
TX_8X8][get_tx_size_context(xd)][mbmi->tx_size];
}
- rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
- mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+ rate2 += av1_cost_bit(cm->fc->ext_intra_probs[0],
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
rate2 += write_uniform_cost(FILTER_INTRA_MODES,
mbmi->ext_intra_mode_info.ext_intra_mode[0]);
if (mbmi->uv_mode != DC_PRED && mbmi->uv_mode != TM_PRED) {
@@ -8029,26 +8023,26 @@
MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
}
if (mbmi->mode == DC_PRED) {
- rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
- mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+ rate2 += av1_cost_bit(cpi->common.fc->ext_intra_probs[1],
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
rate2 += write_uniform_cost(FILTER_INTRA_MODES,
mbmi->ext_intra_mode_info.ext_intra_mode[1]);
}
distortion2 = distortion_y + distortion_uv;
- vp10_encode_intra_block_plane(x, bsize, 0, 0);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_encode_intra_block_plane(x, bsize, 0, 0);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->recon_variance = vp10_high_get_sby_perpixel_variance(
+ x->recon_variance = av1_high_get_sby_perpixel_variance(
cpi, &xd->plane[0].dst, bsize, xd->bd);
} else {
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
rate2 += ref_costs_single[INTRA_FRAME];
@@ -8056,9 +8050,9 @@
rate2 -= (rate_y + rate_uv);
rate_y = 0;
rate_uv = 0;
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
} else {
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
rd_variance_adjustment(x, &this_rd, INTRA_FRAME, x->source_variance);
@@ -8068,7 +8062,7 @@
*best_intra_mode = mbmi->mode;
}
for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+ best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
if (this_rd < *best_rd) {
*best_mode_index = dc_mode_index;
@@ -8079,9 +8073,9 @@
*returnrate_nocoef = rate2;
else
*returnrate_nocoef = rate2 - rate_y - rate_uv;
- *returnrate_nocoef -= vp10_cost_bit(vp10_get_skip_prob(cm, xd), skippable);
- *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
- mbmi->ref_frame[0] != INTRA_FRAME);
+ *returnrate_nocoef -= av1_cost_bit(av1_get_skip_prob(cm, xd), skippable);
+ *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+ mbmi->ref_frame[0] != INTRA_FRAME);
#endif // CONFIG_SUPERTX
rd_cost->dist = distortion2;
rd_cost->rdcost = this_rd;
@@ -8094,21 +8088,23 @@
#endif // CONFIG_EXT_INTRA
#if CONFIG_OBMC
-static void calc_target_weighted_pred(
- const VP10_COMMON *cm, const MACROBLOCK *x, const MACROBLOCKD *xd,
- int mi_row, int mi_col, const uint8_t *above, int above_stride,
- const uint8_t *left, int left_stride, int32_t *mask_buf, int32_t *wsrc_buf);
+static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x,
+ const MACROBLOCKD *xd, int mi_row,
+ int mi_col, const uint8_t *above,
+ int above_stride, const uint8_t *left,
+ int left_stride, int32_t *mask_buf,
+ int32_t *wsrc_buf);
#endif // CONFIG_OBMC
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
- MACROBLOCK *x, int mi_row, int mi_col,
- RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, int mi_row, int mi_col,
+ RD_COST *rd_cost,
#if CONFIG_SUPERTX
- int *returnrate_nocoef,
+ int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
+ AV1_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -8133,16 +8129,16 @@
int single_skippable[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
static const int flag_list[TOTAL_REFS_PER_FRAME] = {
0,
- VPX_LAST_FLAG,
+ AOM_LAST_FLAG,
#if CONFIG_EXT_REFS
- VPX_LAST2_FLAG,
- VPX_LAST3_FLAG,
+ AOM_LAST2_FLAG,
+ AOM_LAST3_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_GOLD_FLAG,
+ AOM_GOLD_FLAG,
#if CONFIG_EXT_REFS
- VPX_BWD_FLAG,
+ AOM_BWD_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_ALT_FLAG
+ AOM_ALT_FLAG
};
int64_t best_rd = best_rd_so_far;
int best_rate_y = INT_MAX, best_rate_uv = INT_MAX;
@@ -8153,7 +8149,7 @@
int midx, best_mode_index = -1;
unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
- vpx_prob comp_mode_p;
+ aom_prob comp_mode_p;
int64_t best_intra_rd = INT64_MAX;
unsigned int best_pred_sse = UINT_MAX;
PREDICTION_MODE best_intra_mode = DC_PRED;
@@ -8169,7 +8165,7 @@
int rate_overhead, rate_dummy;
uint8_t directional_mode_skip_mask[INTRA_MODES];
#endif // CONFIG_EXT_INTRA
- const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+ const int intra_cost_penalty = av1_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
const int *const intra_mode_cost = cpi->mbmode_cost[size_group_lookup[bsize]];
int best_skip2 = 0;
@@ -8194,13 +8190,13 @@
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
#if CONFIG_OBMC
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, int32_t, weighted_src_buf[MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, int32_t, mask2d_buf[MAX_SB_SQUARE]);
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
@@ -8211,7 +8207,7 @@
int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -8221,20 +8217,20 @@
dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_SB_SQUARE * len);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
dst_buf1[2] = tmp_buf1 + 2 * MAX_SB_SQUARE;
dst_buf2[0] = tmp_buf2;
dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
dst_buf2[2] = tmp_buf2 + 2 * MAX_SB_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_OBMC
- vp10_zero(best_mbmode);
- vp10_zero(pmi_uv);
+ av1_zero(best_mbmode);
+ av1_zero(pmi_uv);
if (cm->allow_screen_content_tools) {
if (above_mi)
@@ -8291,22 +8287,22 @@
MODE_INFO *const mi = xd->mi[0];
int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
x->mbmi_ext->mode_context[ref_frame] = 0;
- vp10_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
- mbmi_ext->ref_mv_stack[ref_frame],
+ av1_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
+ mbmi_ext->ref_mv_stack[ref_frame],
#if CONFIG_EXT_INTER
- mbmi_ext->compound_mode_context,
+ mbmi_ext->compound_mode_context,
#endif // CONFIG_EXT_INTER
- candidates, mi_row, mi_col, NULL, NULL,
- mbmi_ext->mode_context);
+ candidates, mi_row, mi_col, NULL, NULL,
+ mbmi_ext->mode_context);
}
#endif // CONFIG_REF_MV
#if CONFIG_OBMC
- vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
- dst_width1, dst_height1, dst_stride1);
- vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
- dst_width2, dst_height2, dst_stride2);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_width1, dst_height1, dst_stride1);
+ av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_width2, dst_height2, dst_stride2);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
calc_target_weighted_pred(cm, x, xd, mi_row, mi_col, dst_buf1[0],
dst_stride1[0], dst_buf2[0], dst_stride2[0],
mask2d_buf, weighted_src_buf);
@@ -8466,9 +8462,9 @@
#endif
mode_index = mode_map[midx];
- this_mode = vp10_mode_order[mode_index].mode;
- ref_frame = vp10_mode_order[mode_index].ref_frame[0];
- second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
+ this_mode = av1_mode_order[mode_index].mode;
+ ref_frame = av1_mode_order[mode_index].ref_frame[0];
+ second_ref_frame = av1_mode_order[mode_index].ref_frame[1];
#if CONFIG_EXT_INTER
if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
@@ -8528,7 +8524,7 @@
}
if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
- (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+ (ref_frame_skip_mask[1] & (1 << AOMMAX(0, second_ref_frame))))
continue;
if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
@@ -8643,7 +8639,7 @@
const uint8_t *src = x->plane[0].src.buf;
const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
highbd_angle_estimation(src, src_stride, rows, cols,
directional_mode_skip_mask);
@@ -8712,9 +8708,8 @@
rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
- rate2 += vp10_cost_bit(
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
- 0);
+ rate2 += av1_cost_bit(
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
if (!xd->lossless[mbmi->segment_id]) {
// super_block_yrd above includes the cost of the tx_size in the
@@ -8727,18 +8722,18 @@
#if CONFIG_EXT_INTRA
if (is_directional_mode) {
int p_angle;
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
rate2 += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
p_angle =
mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle))
+ if (av1_is_intra_filter_switchable(p_angle))
rate2 += cpi->intra_filter_cost[intra_filter_ctx][mbmi->intra_filter];
}
if (mbmi->mode == DC_PRED && ALLOW_FILTER_INTRA_MODES) {
- rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
- mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+ rate2 += av1_cost_bit(cm->fc->ext_intra_probs[0],
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
rate2 += write_uniform_cost(
FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[0]);
@@ -8751,8 +8746,8 @@
}
if (ALLOW_FILTER_INTRA_MODES && mbmi->mode == DC_PRED) {
- rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
- mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+ rate2 += av1_cost_bit(cpi->common.fc->ext_intra_probs[1],
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
rate2 += write_uniform_cost(
FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[1]);
@@ -8761,19 +8756,19 @@
if (this_mode != DC_PRED && this_mode != TM_PRED)
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
- vp10_encode_intra_block_plane(x, bsize, 0, 1);
-#if CONFIG_VP9_HIGHBITDEPTH
+ av1_encode_intra_block_plane(x, bsize, 0, 1);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->recon_variance = vp10_high_get_sby_perpixel_variance(
+ x->recon_variance = av1_high_get_sby_perpixel_variance(
cpi, &xd->plane[0].dst, bsize, xd->bd);
} else {
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
x->recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
} else {
#if CONFIG_REF_MV
int_mv backup_ref_mv[2];
@@ -8798,7 +8793,7 @@
#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
mbmi->ref_mv_idx = 0;
- ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
if (this_mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
int ref;
@@ -8841,10 +8836,10 @@
// TODO(jingning): This should be deprecated shortly.
int idx_offset = (mbmi->mode == NEARMV) ? 1 : 0;
int ref_set =
- VPXMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
+ AOMMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx_offset);
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx_offset);
// Dummy
int_mv backup_fmv[2];
backup_fmv[0] = frame_mv[NEWMV][ref_frame];
@@ -8857,12 +8852,12 @@
RDCOST(x->rdmult, x->rddiv, 0, total_sse))
tmp_ref_rd =
RDCOST(x->rdmult, x->rddiv,
- rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+ rate2 + av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
distortion2);
else
tmp_ref_rd =
RDCOST(x->rdmult, x->rddiv,
- rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
+ rate2 + av1_cost_bit(av1_get_skip_prob(cm, xd), 1) -
rate_y - rate_uv,
total_sse);
}
@@ -8940,8 +8935,8 @@
for (i = 0; i < mbmi->ref_mv_idx; ++i) {
uint8_t drl1_ctx = 0;
- drl1_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
- i + idx_offset);
+ drl1_ctx = av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+ i + idx_offset);
tmp_rate += cpi->drl_mode_cost0[drl1_ctx][1];
}
@@ -8949,8 +8944,8 @@
mbmi->ref_mv_idx + idx_offset + 1 &&
ref_idx < ref_set - 1) {
uint8_t drl1_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
- mbmi->ref_mv_idx + idx_offset);
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+ mbmi->ref_mv_idx + idx_offset);
tmp_rate += cpi->drl_mode_cost0[drl1_ctx][0];
}
@@ -8960,16 +8955,16 @@
#else
if (RDCOST(x->rdmult, x->rddiv, tmp_rate_y + tmp_rate_uv,
tmp_dist) < RDCOST(x->rdmult, x->rddiv, 0, tmp_sse))
- tmp_alt_rd = RDCOST(
- x->rdmult, x->rddiv,
- tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
- tmp_dist);
+ tmp_alt_rd =
+ RDCOST(x->rdmult, x->rddiv,
+ tmp_rate + av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
+ tmp_dist);
else
- tmp_alt_rd = RDCOST(
- x->rdmult, x->rddiv,
- tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
- tmp_rate_y - tmp_rate_uv,
- tmp_sse);
+ tmp_alt_rd =
+ RDCOST(x->rdmult, x->rddiv,
+ tmp_rate + av1_cost_bit(av1_get_skip_prob(cm, xd), 1) -
+ tmp_rate_y - tmp_rate_uv,
+ tmp_sse);
#endif // CONFIG_OBMC
}
@@ -9011,7 +9006,7 @@
if (this_rd == INT64_MAX) continue;
- compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+ compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
}
@@ -9047,15 +9042,15 @@
rate_y = 0;
rate_uv = 0;
// Cost the skip mb case
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
} else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
} else {
// FIXME(rbultje) make this work for splitmv also
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
distortion2 = total_sse;
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
@@ -9065,7 +9060,7 @@
}
} else {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
// Calculate the final RD estimate for this mode.
@@ -9102,7 +9097,7 @@
if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+ best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
}
// Did this mode help.. i.e. is it the new best mode
@@ -9124,11 +9119,10 @@
*returnrate_nocoef = rate2;
else
*returnrate_nocoef = rate2 - rate_y - rate_uv;
- *returnrate_nocoef -=
- vp10_cost_bit(vp10_get_skip_prob(cm, xd),
- disable_skip || skippable || this_skip2);
- *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
- mbmi->ref_frame[0] != INTRA_FRAME);
+ *returnrate_nocoef -= av1_cost_bit(
+ av1_get_skip_prob(cm, xd), disable_skip || skippable || this_skip2);
+ *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+ mbmi->ref_frame[0] != INTRA_FRAME);
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
if (is_inter_block(mbmi) && is_motvar_allowed(mbmi))
*returnrate_nocoef -= cpi->motvar_cost[bsize][mbmi->motion_variation];
@@ -9140,8 +9134,8 @@
best_mbmode = *mbmi;
best_skip2 = this_skip2;
best_mode_skippable = skippable;
- best_rate_y = rate_y + vp10_cost_bit(vp10_get_skip_prob(cm, xd),
- this_skip2 || skippable);
+ best_rate_y = rate_y + av1_cost_bit(av1_get_skip_prob(cm, xd),
+ this_skip2 || skippable);
best_rate_uv = rate_uv;
#if CONFIG_VAR_TX
@@ -9157,11 +9151,11 @@
int qstep = xd->plane[0].dequant[1];
// TODO(debargha): Enhance this by specializing for each mode_index
int scale = 4;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
qstep >>= (xd->bd - 8);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (x->source_variance < UINT_MAX) {
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
@@ -9229,13 +9223,13 @@
}
if (is_inter_mode(mbmi->mode)) {
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
#if CONFIG_OBMC
if (mbmi->motion_variation == OBMC_CAUSAL)
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
- dst_stride1, dst_buf2, dst_stride2);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
#endif // CONFIG_OBMC
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
#if CONFIG_VAR_TX
if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
select_tx_type_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
@@ -9269,13 +9263,13 @@
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, (dist_y + dist_uv)) >
RDCOST(x->rdmult, x->rddiv, 0, (sse_y + sse_uv))) {
skip_blk = 1;
- rate_y = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate_y = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
rate_uv = 0;
dist_y = sse_y;
dist_uv = sse_uv;
} else {
skip_blk = 0;
- rate_y += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate_y += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
if (RDCOST(x->rdmult, x->rddiv, best_rate_y + best_rate_uv, rd_cost->dist) >
@@ -9377,12 +9371,12 @@
#if CONFIG_SUPERTX
best_rate_nocoef = rate2;
#endif
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
} else {
#if CONFIG_SUPERTX
best_rate_nocoef = rate2 - (rate_y + rate_uv_tokenonly[uv_tx]);
#endif
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
if (this_rd < best_rd) {
@@ -9433,11 +9427,11 @@
best_mbmode.ref_frame[1] };
int comp_pred_mode = refs[1] > INTRA_FRAME;
#if CONFIG_REF_MV
- const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
+ const uint8_t rf_type = av1_ref_frame_type(best_mbmode.ref_frame);
if (!comp_pred_mode) {
int i;
int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
- ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+ ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
: INT_MAX;
for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
@@ -9467,7 +9461,7 @@
#else
int i;
int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
- ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+ ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
: INT_MAX;
for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
@@ -9621,8 +9615,8 @@
#endif
if (!cpi->rc.is_src_frame_alt_ref)
- vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
- sf->adaptive_rd_thresh, bsize, best_mode_index);
+ av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+ sf->adaptive_rd_thresh, bsize, best_mode_index);
// macroblock modes
*mbmi = best_mbmode;
@@ -9656,12 +9650,12 @@
}
}
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
- MACROBLOCK *x, RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rd_pick_inter_mode_sb_seg_skip(AV1_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, RD_COST *rd_cost,
+ BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
unsigned char segment_id = mbmi->segment_id;
@@ -9670,7 +9664,7 @@
int64_t best_pred_diff[REFERENCE_MODES];
unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
- vpx_prob comp_mode_p;
+ aom_prob comp_mode_p;
INTERP_FILTER best_filter = SWITCHABLE;
int64_t this_rd = INT64_MAX;
int rate2 = 0;
@@ -9709,7 +9703,7 @@
best_filter = EIGHTTAP_REGULAR;
if (cm->interp_filter == SWITCHABLE &&
#if CONFIG_EXT_INTERP
- vp10_is_interp_needed(xd) &&
+ av1_is_interp_needed(xd) &&
#endif // CONFIG_EXT_INTERP
x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
int rs;
@@ -9721,7 +9715,7 @@
#else
mbmi->interp_filter = i;
#endif
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
if (rs < best_rs) {
best_rs = rs;
#if CONFIG_DUAL_FILTER
@@ -9740,7 +9734,7 @@
#else
mbmi->interp_filter = best_filter;
#endif
- rate2 += vp10_get_switchable_rate(cpi, xd);
+ rate2 += av1_get_switchable_rate(cpi, xd);
} else {
#if CONFIG_DUAL_FILTER
for (i = 0; i < 4; ++i) mbmi->interp_filter[0] = cm->interp_filter;
@@ -9750,7 +9744,7 @@
}
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
+ rate2 += av1_cost_bit(comp_mode_p, comp_pred);
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
@@ -9775,24 +9769,23 @@
(cm->interp_filter == mbmi->interp_filter));
#endif
- vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
- cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
+ av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+ cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
- vp10_zero(best_pred_diff);
+ av1_zero(best_pred_diff);
store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, 0);
}
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
- TileDataEnc *tile_data,
- struct macroblock *x, int mi_row,
- int mi_col, struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi, TileDataEnc *tile_data,
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost,
#if CONFIG_SUPERTX
- int *returnrate_nocoef,
+ int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
+ AV1_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -9805,16 +9798,16 @@
struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE];
static const int flag_list[TOTAL_REFS_PER_FRAME] = {
0,
- VPX_LAST_FLAG,
+ AOM_LAST_FLAG,
#if CONFIG_EXT_REFS
- VPX_LAST2_FLAG,
- VPX_LAST3_FLAG,
+ AOM_LAST2_FLAG,
+ AOM_LAST3_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_GOLD_FLAG,
+ AOM_GOLD_FLAG,
#if CONFIG_EXT_REFS
- VPX_BWD_FLAG,
+ AOM_BWD_FLAG,
#endif // CONFIG_EXT_REFS
- VPX_ALT_FLAG
+ AOM_ALT_FLAG
};
int64_t best_rd = best_rd_so_far;
int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
@@ -9824,7 +9817,7 @@
int ref_index, best_ref_index = 0;
unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
- vpx_prob comp_mode_p;
+ aom_prob comp_mode_p;
#if CONFIG_DUAL_FILTER
INTERP_FILTER tmp_best_filter[4] = { 0 };
#else
@@ -9834,7 +9827,7 @@
int64_t dist_uv;
int skip_uv;
PREDICTION_MODE mode_uv = DC_PRED;
- const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+ const int intra_cost_penalty = av1_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
#if CONFIG_EXT_INTER
int_mv seg_mvs[4][2][TOTAL_REFS_PER_FRAME];
@@ -9845,14 +9838,14 @@
int best_skip2 = 0;
int ref_frame_skip_mask[2] = { 0 };
int internal_active_edge =
- vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+ av1_active_edge_sb(cpi, mi_row, mi_col) && av1_internal_image_edge(cpi);
#if CONFIG_SUPERTX
best_rd_so_far = INT64_MAX;
best_rd = best_rd_so_far;
best_yrd = best_rd_so_far;
#endif // CONFIG_SUPERTX
- vp10_zero(best_mbmode);
+ av1_zero(best_mbmode);
#if CONFIG_EXT_INTRA
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
@@ -9924,8 +9917,8 @@
int64_t total_sse = INT_MAX;
int early_term = 0;
- ref_frame = vp10_ref_order[ref_index].ref_frame[0];
- second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
+ ref_frame = av1_ref_order[ref_index].ref_frame[0];
+ second_ref_frame = av1_ref_order[ref_index].ref_frame[1];
// Look at the reference frame of the best mode so far and set the
// skip mask to look at a subset of the remaining modes.
@@ -9993,7 +9986,7 @@
}
if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
- (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+ (ref_frame_skip_mask[1] & (1 << AOMMAX(0, second_ref_frame))))
continue;
// Test best rd so far against threshold for trying this mode.
@@ -10019,11 +10012,11 @@
// TODO(jingning, jkoleszar): scaling reference frame not supported for
// sub8x8 blocks.
if (ref_frame > INTRA_FRAME &&
- vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+ av1_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
continue;
if (second_ref_frame > INTRA_FRAME &&
- vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
+ av1_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
continue;
if (comp_pred)
@@ -10216,18 +10209,18 @@
bsi, switchable_filter_index, mi_row, mi_col);
#if CONFIG_EXT_INTERP
#if CONFIG_DUAL_FILTER
- if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+ if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
(mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
mbmi->interp_filter[1] != EIGHTTAP_REGULAR)) // invalid config
continue;
#else
- if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+ if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
mbmi->interp_filter != EIGHTTAP_REGULAR) // invalid config
continue;
#endif
#endif // CONFIG_EXT_INTERP
if (tmp_rd == INT64_MAX) continue;
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
@@ -10301,14 +10294,14 @@
bsi, 0, mi_row, mi_col);
#if CONFIG_EXT_INTERP
#if CONFIG_DUAL_FILTER
- if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+ if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
(mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
mbmi->interp_filter[1] != EIGHTTAP_REGULAR)) {
mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
}
#else
- if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+ if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
mbmi->interp_filter != EIGHTTAP_REGULAR)
mbmi->interp_filter = EIGHTTAP_REGULAR;
#endif // CONFIG_DUAL_FILTER
@@ -10345,22 +10338,22 @@
distortion2 += distortion;
if (cm->interp_filter == SWITCHABLE)
- rate2 += vp10_get_switchable_rate(cpi, xd);
+ rate2 += av1_get_switchable_rate(cpi, xd);
if (!mode_excluded)
mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
: cm->reference_mode == COMPOUND_REFERENCE;
- compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+ compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
tmp_best_rdu =
- best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+ best_rd - AOMMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
RDCOST(x->rdmult, x->rddiv, 0, total_sse));
if (tmp_best_rdu > 0) {
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
- vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
+ av1_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
#if CONFIG_VAR_TX
if (!inter_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
&uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -10400,10 +10393,10 @@
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
} else {
// FIXME(rbultje) make this work for splitmv also
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
distortion2 = total_sse;
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
@@ -10413,7 +10406,7 @@
}
} else {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
// Calculate the final RD estimate for this mode.
@@ -10422,7 +10415,7 @@
if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+ best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
}
// Did this mode help.. i.e. is it the new best mode
@@ -10441,9 +10434,9 @@
*returnrate_nocoef = rate2 - rate_y - rate_uv;
if (!disable_skip)
*returnrate_nocoef -=
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), this_skip2);
- *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
- mbmi->ref_frame[0] != INTRA_FRAME);
+ av1_cost_bit(av1_get_skip_prob(cm, xd), this_skip2);
+ *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+ mbmi->ref_frame[0] != INTRA_FRAME);
assert(*returnrate_nocoef > 0);
#endif // CONFIG_SUPERTX
rd_cost->dist = distortion2;
@@ -10468,11 +10461,11 @@
int qstep = xd->plane[0].dequant[1];
// TODO(debargha): Enhance this by specializing for each mode_index
int scale = 4;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
qstep >>= (xd->bd - 8);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (x->source_variance < UINT_MAX) {
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
@@ -10552,8 +10545,8 @@
!is_inter_block(&best_mbmode));
#endif
- vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
- sf->adaptive_rd_thresh, bsize, best_ref_index);
+ av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+ sf->adaptive_rd_thresh, bsize, best_ref_index);
// macroblock modes
*mbmi = best_mbmode;
@@ -10587,34 +10580,34 @@
}
#if CONFIG_OBMC
-// This function has a structure similar to vp10_build_obmc_inter_prediction
+// This function has a structure similar to av1_build_obmc_inter_prediction
//
// The OBMC predictor is computed as:
//
// PObmc(x,y) =
-// VPX_BLEND_A64(Mh(x),
-// VPX_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
+// AOM_BLEND_A64(Mh(x),
+// AOM_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
// PLeft(x, y))
//
-// Scaling up by VPX_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
+// Scaling up by AOM_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
// rounding, this can be written as:
//
-// VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
+// AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
// Mh(x) * Mv(y) * P(x,y) +
// Mh(x) * Cv(y) * Pabove(x,y) +
-// VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+// AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
//
// Where :
//
-// Cv(y) = VPX_BLEND_A64_MAX_ALPHA - Mv(y)
-// Ch(y) = VPX_BLEND_A64_MAX_ALPHA - Mh(y)
+// Cv(y) = AOM_BLEND_A64_MAX_ALPHA - Mv(y)
+// Ch(y) = AOM_BLEND_A64_MAX_ALPHA - Mh(y)
//
// This function computes 'wsrc' and 'mask' as:
//
// wsrc(x, y) =
-// VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * src(x, y) -
+// AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * src(x, y) -
// Mh(x) * Cv(y) * Pabove(x,y) +
-// VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+// AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
//
// mask(x, y) = Mh(x) * Mv(y)
//
@@ -10623,10 +10616,9 @@
// computing:
//
// error(x, y) =
-// wsrc(x, y) - mask(x, y) * P(x, y) / (VPX_BLEND_A64_MAX_ALPHA ** 2)
+// wsrc(x, y) - mask(x, y) * P(x, y) / (AOM_BLEND_A64_MAX_ALPHA ** 2)
//
-static void calc_target_weighted_pred(const VP10_COMMON *cm,
- const MACROBLOCK *x,
+static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x,
const MACROBLOCKD *xd, int mi_row,
int mi_col, const uint8_t *above,
int above_stride, const uint8_t *left,
@@ -10638,26 +10630,26 @@
const int bh = 8 * xd->n8_h;
const int wsrc_stride = bw;
const int mask_stride = bw;
- const int src_scale = VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA;
-#if CONFIG_VP9_HIGHBITDEPTH
+ const int src_scale = AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA;
+#if CONFIG_AOM_HIGHBITDEPTH
const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
#else
const int is_hbd = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// plane 0 should not be subsampled
assert(xd->plane[0].subsampling_x == 0);
assert(xd->plane[0].subsampling_y == 0);
- vp10_zero_array(wsrc_buf, bw * bh);
- for (i = 0; i < bw * bh; ++i) mask_buf[i] = VPX_BLEND_A64_MAX_ALPHA;
+ av1_zero_array(wsrc_buf, bw * bh);
+ for (i = 0; i < bw * bh; ++i) mask_buf[i] = AOM_BLEND_A64_MAX_ALPHA;
// handle above row
if (xd->up_available) {
const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
- const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+ const int miw = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
const int mi_row_offset = -1;
- const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+ const uint8_t *const mask1d = av1_get_obmc_mask(overlap);
assert(miw > 0);
@@ -10667,7 +10659,7 @@
const MB_MODE_INFO *const above_mbmi =
&xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
const int mi_step =
- VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+ AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
const int neighbor_bw = mi_step * MI_SIZE;
if (is_neighbor_overlappable(above_mbmi)) {
@@ -10680,7 +10672,7 @@
for (row = 0; row < overlap; ++row) {
const uint8_t m0 = mask1d[row];
- const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+ const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
for (col = 0; col < neighbor_bw; ++col) {
wsrc[col] = m1 * tmp[col];
mask[col] = m0;
@@ -10689,13 +10681,13 @@
mask += mask_stride;
tmp += tmp_stride;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
const uint16_t *tmp = CONVERT_TO_SHORTPTR(above);
for (row = 0; row < overlap; ++row) {
const uint8_t m0 = mask1d[row];
- const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+ const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
for (col = 0; col < neighbor_bw; ++col) {
wsrc[col] = m1 * tmp[col];
mask[col] = m0;
@@ -10704,7 +10696,7 @@
mask += mask_stride;
tmp += tmp_stride;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -10714,16 +10706,16 @@
}
for (i = 0; i < bw * bh; ++i) {
- wsrc_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
- mask_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
+ wsrc_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
+ mask_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
}
// handle left column
if (xd->left_available) {
const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
- const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+ const int mih = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
const int mi_col_offset = -1;
- const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+ const uint8_t *const mask1d = av1_get_obmc_mask(overlap);
assert(mih > 0);
@@ -10733,7 +10725,7 @@
const MB_MODE_INFO *const left_mbmi =
&xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
const int mi_step =
- VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+ AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
const int neighbor_bh = mi_step * MI_SIZE;
if (is_neighbor_overlappable(left_mbmi)) {
@@ -10747,32 +10739,32 @@
for (row = 0; row < neighbor_bh; ++row) {
for (col = 0; col < overlap; ++col) {
const uint8_t m0 = mask1d[col];
- const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
- wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
- (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
- mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+ const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
+ wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
+ (tmp[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
+ mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
}
wsrc += wsrc_stride;
mask += mask_stride;
tmp += tmp_stride;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
const uint16_t *tmp = CONVERT_TO_SHORTPTR(left);
for (row = 0; row < neighbor_bh; ++row) {
for (col = 0; col < overlap; ++col) {
const uint8_t m0 = mask1d[col];
- const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
- wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
- (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
- mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+ const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
+ wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
+ (tmp[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
+ mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
}
wsrc += wsrc_stride;
mask += mask_stride;
tmp += tmp_stride;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -10791,7 +10783,7 @@
wsrc_buf += wsrc_stride;
src += x->plane[0].src.stride;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
const uint16_t *src = CONVERT_TO_SHORTPTR(x->plane[0].src.buf);
@@ -10802,7 +10794,7 @@
wsrc_buf += wsrc_stride;
src += x->plane[0].src.stride;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
#endif // CONFIG_OBMC
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index 4ce2879..de02f1f 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_RDOPT_H_
-#define VP10_ENCODER_RDOPT_H_
+#ifndef AV1_ENCODER_RDOPT_H_
+#define AV1_ENCODER_RDOPT_H_
#include "av1/common/blockd.h"
@@ -21,71 +21,70 @@
#endif
struct TileInfo;
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
struct RD_COST;
-void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
- struct RD_COST *rd_cost, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd);
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs);
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs, int bd);
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs);
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs, int bd);
#endif
-void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x, int mi_row, int mi_col,
- struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
+ struct TileDataEnc *tile_data,
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost,
#if CONFIG_SUPERTX
- int *returnrate_nocoef,
+ int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far);
-void vp10_rd_pick_inter_mode_sb_seg_skip(
- struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+void av1_rd_pick_inter_mode_sb_seg_skip(
+ struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
-int vp10_internal_image_edge(struct VP10_COMP *cpi);
-int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
-int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
-int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
+int av1_internal_image_edge(struct AV1_COMP *cpi);
+int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
+int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
+int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x, int mi_row,
- int mi_col, struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
+ struct TileDataEnc *tile_data,
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost,
#if CONFIG_SUPERTX
- int *returnrate_nocoef,
+ int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far);
#if CONFIG_SUPERTX
#if CONFIG_VAR_TX
-void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
- int blk_row, int blk_col, int plane, int block,
- int plane_bsize, int coeff_ctx, int *rate,
- int64_t *dist, int64_t *bsse, int *skip);
+void av1_tx_block_rd_b(const AV1_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+ int blk_row, int blk_col, int plane, int block,
+ int plane_bsize, int coeff_ctx, int *rate, int64_t *dist,
+ int64_t *bsse, int *skip);
#endif
-void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
- int *rate, int64_t *distortion,
- int *skippable, int64_t *sse,
- int64_t ref_best_rd, int plane,
- BLOCK_SIZE bsize, TX_SIZE tx_size,
- int use_fast_coef_casting);
+void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
+ int64_t *distortion, int *skippable,
+ int64_t *sse, int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int use_fast_coef_casting);
#endif // CONFIG_SUPERTX
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RDOPT_H_
+#endif // AV1_ENCODER_RDOPT_H_
diff --git a/av1/encoder/resize.c b/av1/encoder/resize.c
index e209b21..91fa6ed 100644
--- a/av1/encoder/resize.c
+++ b/av1/encoder/resize.c
@@ -15,9 +15,9 @@
#include <stdlib.h>
#include <string.h>
-#if CONFIG_VP9_HIGHBITDEPTH
-#include "aom_dsp/vpx_dsp_common.h"
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+#include "aom_dsp/aom_dsp_common.h"
+#endif // CONFIG_AOM_HIGHBITDEPTH
#include "aom_ports/mem.h"
#include "av1/common/common.h"
#include "av1/encoder/resize.h"
@@ -132,8 +132,8 @@
};
// Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
-static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
+static const int16_t av1_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t av1_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
int outlength16 = outlength * 16;
@@ -239,8 +239,8 @@
static void down2_symeven(const uint8_t *const input, int length,
uint8_t *output) {
// Actual filter len = 2 * filter_len_half.
- const int16_t *filter = vp10_down2_symeven_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+ const int16_t *filter = av1_down2_symeven_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
int i, j;
uint8_t *optr = output;
int l1 = filter_len_half;
@@ -295,8 +295,8 @@
static void down2_symodd(const uint8_t *const input, int length,
uint8_t *output) {
// Actual filter len = 2 * filter_len_half - 1.
- const int16_t *filter = vp10_down2_symodd_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+ const int16_t *filter = av1_down2_symodd_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
int i, j;
uint8_t *optr = output;
int l1 = filter_len_half - 1;
@@ -419,9 +419,9 @@
}
}
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
- int in_stride, uint8_t *output, int height2, int width2,
- int out_stride) {
+void av1_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
+ int out_stride) {
int i;
uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
uint8_t *tmpbuf =
@@ -450,7 +450,7 @@
free(arrbuf2);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_interpolate(const uint16_t *const input, int inlength,
uint16_t *output, int outlength, int bd) {
const int64_t delta =
@@ -541,8 +541,8 @@
static void highbd_down2_symeven(const uint16_t *const input, int length,
uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half.
- static const int16_t *filter = vp10_down2_symeven_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+ static const int16_t *filter = av1_down2_symeven_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
int i, j;
uint16_t *optr = output;
int l1 = filter_len_half;
@@ -597,8 +597,8 @@
static void highbd_down2_symodd(const uint16_t *const input, int length,
uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half - 1.
- static const int16_t *filter = vp10_down2_symodd_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+ static const int16_t *filter = av1_down2_symodd_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
int i, j;
uint16_t *optr = output;
int l1 = filter_len_half - 1;
@@ -708,9 +708,9 @@
}
}
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
- int in_stride, uint8_t *output, int height2,
- int width2, int out_stride, int bd) {
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd) {
int i;
uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
uint16_t *tmpbuf =
@@ -736,84 +736,84 @@
free(arrbuf);
free(arrbuf2);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
- owidth / 2, ouv_stride);
- vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
- owidth / 2, ouv_stride);
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride);
+ av1_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride);
}
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
- ouv_stride);
- vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
- ouv_stride);
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+ ouv_stride);
+ av1_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+ ouv_stride);
}
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
- ouv_stride);
- vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
- ouv_stride);
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride);
+ av1_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride);
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
- oy_stride, bd);
- vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
- owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
- owidth / 2, ouv_stride, bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ av1_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride, bd);
+ av1_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride, bd);
}
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
- oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
- owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
- owidth / 2, ouv_stride, bd);
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ av1_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+ owidth / 2, ouv_stride, bd);
+ av1_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+ owidth / 2, ouv_stride, bd);
}
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
- oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
- ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
- ouv_stride, bd);
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ av1_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride, bd);
+ av1_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/resize.h b/av1/encoder/resize.h
index 8fe1d1b..94f9ea3 100644
--- a/av1/encoder/resize.h
+++ b/av1/encoder/resize.h
@@ -8,61 +8,61 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_RESIZE_H_
-#define VP10_ENCODER_RESIZE_H_
+#ifndef AV1_ENCODER_RESIZE_H_
+#define AV1_ENCODER_RESIZE_H_
#include <stdio.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
- int in_stride, uint8_t *output, int height2, int width2,
- int out_stride);
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width, uint8_t *oy,
- int oy_stride, uint8_t *ou, uint8_t *ov,
- int ouv_stride, int oheight, int owidth);
+void av1_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
+ int out_stride);
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
- int in_stride, uint8_t *output, int height2,
- int width2, int out_stride, int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride, int height, int width,
- uint8_t *oy, int oy_stride, uint8_t *ou,
- uint8_t *ov, int ouv_stride, int oheight,
- int owidth, int bd);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd);
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RESIZE_H_
+#endif // AV1_ENCODER_RESIZE_H_
diff --git a/av1/encoder/segmentation.c b/av1/encoder/segmentation.c
index 5ac1283..9585878 100644
--- a/av1/encoder/segmentation.c
+++ b/av1/encoder/segmentation.c
@@ -10,7 +10,7 @@
#include <limits.h>
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/pred_common.h"
#include "av1/common/tile_common.h"
@@ -19,38 +19,38 @@
#include "av1/encoder/segmentation.h"
#include "av1/encoder/subexp.h"
-void vp10_enable_segmentation(struct segmentation *seg) {
+void av1_enable_segmentation(struct segmentation *seg) {
seg->enabled = 1;
seg->update_map = 1;
seg->update_data = 1;
}
-void vp10_disable_segmentation(struct segmentation *seg) {
+void av1_disable_segmentation(struct segmentation *seg) {
seg->enabled = 0;
seg->update_map = 0;
seg->update_data = 0;
}
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
- unsigned char abs_delta) {
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
+ unsigned char abs_delta) {
seg->abs_delta = abs_delta;
memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
}
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id) {
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
seg->feature_mask[segment_id] &= ~(1 << feature_id);
}
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id) {
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
seg->feature_data[segment_id][feature_id] = 0;
}
// Based on set of segment counts calculate a probability tree
static void calc_segtree_probs(unsigned *segcounts,
- vpx_prob *segment_tree_probs,
- const vpx_prob *cur_tree_probs) {
+ aom_prob *segment_tree_probs,
+ const aom_prob *cur_tree_probs) {
// Work out probabilities of each segment
const unsigned cc[4] = { segcounts[0] + segcounts[1],
segcounts[2] + segcounts[3],
@@ -70,13 +70,13 @@
for (i = 0; i < 7; i++) {
const unsigned *ct =
i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
- vp10_prob_diff_update_savings_search(
+ av1_prob_diff_update_savings_search(
ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
}
}
// Based on set of segment counts and probabilities calculate a cost estimate
-static int cost_segmap(unsigned *segcounts, vpx_prob *probs) {
+static int cost_segmap(unsigned *segcounts, aom_prob *probs) {
const int c01 = segcounts[0] + segcounts[1];
const int c23 = segcounts[2] + segcounts[3];
const int c45 = segcounts[4] + segcounts[5];
@@ -85,35 +85,35 @@
const int c4567 = c45 + c67;
// Cost the top node of the tree
- int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
+ int cost = c0123 * av1_cost_zero(probs[0]) + c4567 * av1_cost_one(probs[0]);
// Cost subsequent levels
if (c0123 > 0) {
- cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
+ cost += c01 * av1_cost_zero(probs[1]) + c23 * av1_cost_one(probs[1]);
if (c01 > 0)
- cost += segcounts[0] * vp10_cost_zero(probs[3]) +
- segcounts[1] * vp10_cost_one(probs[3]);
+ cost += segcounts[0] * av1_cost_zero(probs[3]) +
+ segcounts[1] * av1_cost_one(probs[3]);
if (c23 > 0)
- cost += segcounts[2] * vp10_cost_zero(probs[4]) +
- segcounts[3] * vp10_cost_one(probs[4]);
+ cost += segcounts[2] * av1_cost_zero(probs[4]) +
+ segcounts[3] * av1_cost_one(probs[4]);
}
if (c4567 > 0) {
- cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
+ cost += c45 * av1_cost_zero(probs[2]) + c67 * av1_cost_one(probs[2]);
if (c45 > 0)
- cost += segcounts[4] * vp10_cost_zero(probs[5]) +
- segcounts[5] * vp10_cost_one(probs[5]);
+ cost += segcounts[4] * av1_cost_zero(probs[5]) +
+ segcounts[5] * av1_cost_one(probs[5]);
if (c67 > 0)
- cost += segcounts[6] * vp10_cost_zero(probs[6]) +
- segcounts[7] * vp10_cost_one(probs[6]);
+ cost += segcounts[6] * av1_cost_zero(probs[6]) +
+ segcounts[7] * av1_cost_one(probs[6]);
}
return cost;
}
-static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs(const AV1_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
@@ -138,7 +138,7 @@
const int pred_segment_id =
get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
const int pred_flag = pred_segment_id == segment_id;
- const int pred_context = vp10_get_pred_context_seg_id(xd);
+ const int pred_context = av1_get_pred_context_seg_id(xd);
// Store the prediction status for this mb and update counts
// as appropriate
@@ -150,7 +150,7 @@
}
}
-static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
@@ -285,7 +285,7 @@
#endif // CONFIG_EXT_PARTITION_TYPES
}
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) {
struct segmentation *seg = &cm->seg;
struct segmentation_probs *segp = &cm->fc->seg;
@@ -298,23 +298,23 @@
unsigned *no_pred_segcounts = cm->counts.seg.tree_total;
unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred;
- vpx_prob no_pred_tree[SEG_TREE_PROBS];
- vpx_prob t_pred_tree[SEG_TREE_PROBS];
- vpx_prob t_nopred_prob[PREDICTION_PROBS];
+ aom_prob no_pred_tree[SEG_TREE_PROBS];
+ aom_prob t_pred_tree[SEG_TREE_PROBS];
+ aom_prob t_nopred_prob[PREDICTION_PROBS];
(void)xd;
// We are about to recompute all the segment counts, so zero the accumulators.
- vp10_zero(cm->counts.seg);
+ av1_zero(cm->counts.seg);
// First of all generate stats regarding how well the last segment map
// predicts this one
for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
TileInfo tile_info;
- vp10_tile_set_row(&tile_info, cm, tile_row);
+ av1_tile_set_row(&tile_info, cm, tile_row);
for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
MODE_INFO **mi_ptr;
- vp10_tile_set_col(&tile_info, cm, tile_col);
+ av1_tile_set_col(&tile_info, cm, tile_col);
mi_ptr = cm->mi_grid_visible + tile_info.mi_row_start * cm->mi_stride +
tile_info.mi_col_start;
for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
@@ -348,13 +348,13 @@
const int count1 = temporal_predictor_count[i][1];
t_nopred_prob[i] = get_binary_prob(count0, count1);
- vp10_prob_diff_update_savings_search(temporal_predictor_count[i],
- segp->pred_probs[i],
- &t_nopred_prob[i], DIFF_UPDATE_PROB);
+ av1_prob_diff_update_savings_search(temporal_predictor_count[i],
+ segp->pred_probs[i],
+ &t_nopred_prob[i], DIFF_UPDATE_PROB);
// Add in the predictor signaling cost
- t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) +
- count1 * vp10_cost_one(t_nopred_prob[i]);
+ t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) +
+ count1 * av1_cost_one(t_nopred_prob[i]);
}
}
@@ -367,12 +367,12 @@
}
}
-void vp10_reset_segment_features(VP10_COMMON *cm) {
+void av1_reset_segment_features(AV1_COMMON *cm) {
struct segmentation *seg = &cm->seg;
// Set up default state for MB feature flags
seg->enabled = 0;
seg->update_map = 0;
seg->update_data = 0;
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
}
diff --git a/av1/encoder/segmentation.h b/av1/encoder/segmentation.h
index 3c79bd1..e15c8b1 100644
--- a/av1/encoder/segmentation.h
+++ b/av1/encoder/segmentation.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_SEGMENTATION_H_
-#define VP10_ENCODER_SEGMENTATION_H_
+#ifndef AV1_ENCODER_SEGMENTATION_H_
+#define AV1_ENCODER_SEGMENTATION_H_
#include "av1/common/blockd.h"
#include "av1/encoder/encoder.h"
@@ -18,13 +18,13 @@
extern "C" {
#endif
-void vp10_enable_segmentation(struct segmentation *seg);
-void vp10_disable_segmentation(struct segmentation *seg);
+void av1_enable_segmentation(struct segmentation *seg);
+void av1_disable_segmentation(struct segmentation *seg);
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id);
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id);
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id);
// The values given for each segment can be either deltas (from the default
// value chosen for the frame) or absolute values.
@@ -36,15 +36,15 @@
//
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
// the absolute values given).
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
- unsigned char abs_delta);
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
+ unsigned char abs_delta);
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd);
-void vp10_reset_segment_features(VP10_COMMON *cm);
+void av1_reset_segment_features(AV1_COMMON *cm);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SEGMENTATION_H_
+#endif // AV1_ENCODER_SEGMENTATION_H_
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index ab66250..d0b198b 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -14,7 +14,7 @@
#include "av1/encoder/speed_features.h"
#include "av1/encoder/rdopt.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
// Mesh search patters for various speed settings
static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
@@ -37,7 +37,7 @@
// Intra only frames, golden frames (except alt ref overlays) and
// alt ref frames tend to be coded at a higher than ambient quality
-static int frame_is_boosted(const VP10_COMP *cpi) {
+static int frame_is_boosted(const AV1_COMP *cpi) {
return frame_is_kf_gf_arf(cpi);
}
@@ -47,7 +47,7 @@
// partly on the screen area that over which they propogate. Propogation is
// limited by transform block size but the screen area take up by a given block
// size will be larger for a small image format stretched to full screen.
-static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
+static BLOCK_SIZE set_partition_min_limit(AV1_COMMON *const cm) {
unsigned int screen_area = (cm->width * cm->height);
// Select block size based on image format size.
@@ -63,13 +63,13 @@
}
}
-static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_good_speed_feature_framesize_dependent(AV1_COMP *cpi,
SPEED_FEATURES *sf,
int speed) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (speed >= 1) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->disable_split_mask =
cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->partition_search_breakout_dist_thr = (1 << 23);
@@ -80,7 +80,7 @@
}
if (speed >= 2) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->disable_split_mask =
cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->adaptive_pred_interp_filter = 0;
@@ -95,7 +95,7 @@
}
if (speed >= 3) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->disable_split_mask = DISABLE_ALL_SPLIT;
sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0;
sf->partition_search_breakout_dist_thr = (1 << 25);
@@ -114,12 +114,12 @@
// Also if the image edge is internal to the coded area.
if ((speed >= 1) && (cpi->oxcf.pass == 2) &&
((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
- (vp10_internal_image_edge(cpi)))) {
+ (av1_internal_image_edge(cpi)))) {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
}
if (speed >= 4) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->partition_search_breakout_dist_thr = (1 << 26);
} else {
sf->partition_search_breakout_dist_thr = (1 << 24);
@@ -128,13 +128,13 @@
}
}
-static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
+static void set_good_speed_feature(AV1_COMP *cpi, AV1_COMMON *cm,
SPEED_FEATURES *sf, int speed) {
const int boosted = frame_is_boosted(cpi);
if (speed >= 1) {
if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
- vp10_internal_image_edge(cpi)) {
+ av1_internal_image_edge(cpi)) {
sf->use_square_partition_only = !frame_is_boosted(cpi);
} else {
sf->use_square_partition_only = !frame_is_intra_only(cm);
@@ -237,12 +237,12 @@
}
}
-static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_rt_speed_feature_framesize_dependent(AV1_COMP *cpi,
SPEED_FEATURES *sf,
int speed) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (speed >= 1) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->disable_split_mask =
cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
@@ -251,7 +251,7 @@
}
if (speed >= 2) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->disable_split_mask =
cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
@@ -260,7 +260,7 @@
}
if (speed >= 5) {
- if (VPXMIN(cm->width, cm->height) >= 720) {
+ if (AOMMIN(cm->width, cm->height) >= 720) {
sf->partition_search_breakout_dist_thr = (1 << 25);
} else {
sf->partition_search_breakout_dist_thr = (1 << 23);
@@ -269,13 +269,13 @@
if (speed >= 7) {
sf->encode_breakout_thresh =
- (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
+ (AOMMIN(cm->width, cm->height) >= 720) ? 800 : 300;
}
}
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
- vpx_tune_content content) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_rt_speed_feature(AV1_COMP *cpi, SPEED_FEATURES *sf, int speed,
+ aom_tune_content content) {
+ AV1_COMMON *const cm = &cpi->common;
const int is_keyframe = cm->frame_type == KEY_FRAME;
const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
sf->static_segmentation = 0;
@@ -401,7 +401,7 @@
if (!is_keyframe) {
int i;
- if (content == VPX_CONTENT_SCREEN) {
+ if (content == AOM_CONTENT_SCREEN) {
for (i = 0; i < BLOCK_SIZES; ++i)
sf->intra_y_mode_bsize_mask[i] = INTRA_DC_TM_H_V;
} else {
@@ -435,9 +435,9 @@
}
}
-void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_dependent(AV1_COMP *cpi) {
SPEED_FEATURES *const sf = &cpi->sf;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RD_OPT *const rd = &cpi->rd;
int i;
@@ -464,11 +464,11 @@
}
}
-void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_independent(AV1_COMP *cpi) {
SPEED_FEATURES *const sf = &cpi->sf;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
int i;
// best quality defaults
@@ -567,8 +567,8 @@
sf->partition_search_breakout_dist_thr <<= 2 * (MAX_SB_SIZE_LOG2 - 6);
}
- cpi->full_search_sad = vp10_full_search_sad;
- cpi->diamond_search_sad = vp10_diamond_search_sad;
+ cpi->full_search_sad = av1_full_search_sad;
+ cpi->diamond_search_sad = av1_diamond_search_sad;
sf->allow_exhaustive_searches = 1;
if (oxcf->mode == BEST) {
@@ -609,14 +609,13 @@
}
if (sf->mv.subpel_search_method == SUBPEL_TREE) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_more;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
- cpi->find_fractional_mv_step =
- vp10_find_best_sub_pixel_tree_pruned_evenmore;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_evenmore;
}
#if !CONFIG_AOM_QM
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index 2457c5b..18cb380 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_SPEED_FEATURES_H_
-#define VP10_ENCODER_SPEED_FEATURES_H_
+#ifndef AV1_ENCODER_SPEED_FEATURES_H_
+#define AV1_ENCODER_SPEED_FEATURES_H_
#include "av1/common/enums.h"
@@ -471,7 +471,7 @@
// Allow skipping partition search for still image frame
int allow_partition_search_skip;
- // Fast approximation of vp10_model_rd_from_var_lapndz
+ // Fast approximation of av1_model_rd_from_var_lapndz
int simple_model_rd_from_var;
// Do sub-pixel search in up-sampled reference frames
@@ -482,13 +482,13 @@
int use_transform_domain_distortion;
} SPEED_FEATURES;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi);
-void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi);
+void av1_set_speed_features_framesize_independent(struct AV1_COMP *cpi);
+void av1_set_speed_features_framesize_dependent(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SPEED_FEATURES_H_
+#endif // AV1_ENCODER_SPEED_FEATURES_H_
diff --git a/av1/encoder/subexp.c b/av1/encoder/subexp.c
index d722654..dd6c250 100644
--- a/av1/encoder/subexp.c
+++ b/av1/encoder/subexp.c
@@ -14,7 +14,7 @@
#include "av1/encoder/cost.h"
#include "av1/encoder/subexp.h"
-#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define av1_cost_upd256 ((int)(av1_cost_one(upd) - av1_cost_zero(upd)))
static const uint8_t update_bits[255] = {
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
@@ -76,54 +76,54 @@
return i;
}
-static int prob_diff_update_cost(vpx_prob newp, vpx_prob oldp) {
+static int prob_diff_update_cost(aom_prob newp, aom_prob oldp) {
int delp = remap_prob(newp, oldp);
- return update_bits[delp] << VP10_PROB_COST_SHIFT;
+ return update_bits[delp] << AV1_PROB_COST_SHIFT;
}
-static void encode_uniform(vp10_writer *w, int v) {
+static void encode_uniform(aom_writer *w, int v) {
const int l = 8;
const int m = (1 << l) - 190;
if (v < m) {
- vp10_write_literal(w, v, l - 1);
+ aom_write_literal(w, v, l - 1);
} else {
- vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
- vp10_write_literal(w, (v - m) & 1, 1);
+ aom_write_literal(w, m + ((v - m) >> 1), l - 1);
+ aom_write_literal(w, (v - m) & 1, 1);
}
}
-static INLINE int write_bit_gte(vp10_writer *w, int word, int test) {
- vp10_write_literal(w, word >= test, 1);
+static INLINE int write_bit_gte(aom_writer *w, int word, int test) {
+ aom_write_literal(w, word >= test, 1);
return word >= test;
}
-static void encode_term_subexp(vp10_writer *w, int word) {
+static void encode_term_subexp(aom_writer *w, int word) {
if (!write_bit_gte(w, word, 16)) {
- vp10_write_literal(w, word, 4);
+ aom_write_literal(w, word, 4);
} else if (!write_bit_gte(w, word, 32)) {
- vp10_write_literal(w, word - 16, 4);
+ aom_write_literal(w, word - 16, 4);
} else if (!write_bit_gte(w, word, 64)) {
- vp10_write_literal(w, word - 32, 5);
+ aom_write_literal(w, word - 32, 5);
} else {
encode_uniform(w, word - 64);
}
}
-void vp10_write_prob_diff_update(vp10_writer *w, vpx_prob newp, vpx_prob oldp) {
+void av1_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
const int delp = remap_prob(newp, oldp);
encode_term_subexp(w, delp);
}
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
- vpx_prob *bestp, vpx_prob upd) {
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+ aom_prob *bestp, aom_prob upd) {
const int old_b = cost_branch256(ct, oldp);
int bestsavings = 0;
- vpx_prob newp, bestnewp = oldp;
+ aom_prob newp, bestnewp = oldp;
const int step = *bestp > oldp ? -1 : 1;
for (newp = *bestp; newp != oldp; newp += step) {
const int new_b = cost_branch256(ct, newp);
- const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+ const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
const int savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -134,17 +134,17 @@
return bestsavings;
}
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
- const vpx_prob *oldp,
- vpx_prob *bestp, vpx_prob upd,
- int stepsize) {
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
+ const aom_prob *oldp,
+ aom_prob *bestp, aom_prob upd,
+ int stepsize) {
int i, old_b, new_b, update_b, savings, bestsavings;
int newp;
const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
const int step = stepsize * step_sign;
- vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
- vp10_model_to_full_probs(oldp, oldplist);
- memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+ aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+ av1_model_to_full_probs(oldp, oldplist);
+ memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
old_b += cost_branch256(ct + 2 * i, oldplist[i]);
old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
@@ -157,11 +157,11 @@
for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
- vp10_model_to_full_probs(newplist, newplist);
+ av1_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += cost_branch256(ct + 2 * i, newplist[i]);
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
- update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+ update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -174,7 +174,7 @@
}
#if CONFIG_ENTROPY
-static int get_cost(unsigned int ct[][2], vpx_prob p, int n) {
+static int get_cost(unsigned int ct[][2], aom_prob p, int n) {
int i, p0 = p;
unsigned int total_ct[2] = { 0, 0 };
int cost = 0;
@@ -184,22 +184,22 @@
total_ct[0] += ct[i][0];
total_ct[1] += ct[i][1];
if (i < n)
- p = vp10_merge_probs(p0, total_ct, COEF_COUNT_SAT_BITS,
- COEF_MAX_UPDATE_FACTOR_BITS);
+ p = av1_merge_probs(p0, total_ct, COEF_COUNT_SAT_BITS,
+ COEF_MAX_UPDATE_FACTOR_BITS);
}
return cost;
}
-int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
- vpx_prob *bestp, vpx_prob upd, int n) {
+int av1_prob_update_search_subframe(unsigned int ct[][2], aom_prob oldp,
+ aom_prob *bestp, aom_prob upd, int n) {
const int old_b = get_cost(ct, oldp, n);
int bestsavings = 0;
- vpx_prob newp, bestnewp = oldp;
+ aom_prob newp, bestnewp = oldp;
const int step = *bestp > oldp ? -1 : 1;
for (newp = *bestp; newp != oldp; newp += step) {
const int new_b = get_cost(ct, newp, n);
- const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+ const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
const int savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -210,16 +210,16 @@
return bestsavings;
}
-int vp10_prob_update_search_model_subframe(
- unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
- vpx_prob *bestp, vpx_prob upd, int stepsize, int n) {
+int av1_prob_update_search_model_subframe(
+ unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const aom_prob *oldp,
+ aom_prob *bestp, aom_prob upd, int stepsize, int n) {
int i, old_b, new_b, update_b, savings, bestsavings;
int newp;
const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
const int step = stepsize * step_sign;
- vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
- vp10_model_to_full_probs(oldp, oldplist);
- memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+ aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+ av1_model_to_full_probs(oldp, oldplist);
+ memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
old_b += get_cost(ct[i], oldplist[i], n);
old_b += get_cost(ct[PIVOT_NODE], oldplist[PIVOT_NODE], n);
@@ -232,11 +232,11 @@
for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
- vp10_model_to_full_probs(newplist, newplist);
+ av1_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += get_cost(ct[i], newplist[i], n);
new_b += get_cost(ct[PIVOT_NODE], newplist[PIVOT_NODE], n);
- update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+ update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -249,40 +249,40 @@
}
#endif // CONFIG_ENTROPY
-void vp10_cond_prob_diff_update(vp10_writer *w, vpx_prob *oldp,
- const unsigned int ct[2]) {
- const vpx_prob upd = DIFF_UPDATE_PROB;
- vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+void av1_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
+ const unsigned int ct[2]) {
+ const aom_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = get_binary_prob(ct[0], ct[1]);
const int savings =
- vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+ av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
assert(newp >= 1);
if (savings > 0) {
- vp10_write(w, 1, upd);
- vp10_write_prob_diff_update(w, newp, *oldp);
+ aom_write(w, 1, upd);
+ av1_write_prob_diff_update(w, newp, *oldp);
*oldp = newp;
} else {
- vp10_write(w, 0, upd);
+ aom_write(w, 0, upd);
}
}
-int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
- const unsigned int ct[2]) {
- const vpx_prob upd = DIFF_UPDATE_PROB;
- vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
+ const unsigned int ct[2]) {
+ const aom_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = get_binary_prob(ct[0], ct[1]);
const int savings =
- vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+ av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
return savings;
}
-void vp10_write_primitive_symmetric(vp10_writer *w, int word,
- unsigned int abs_bits) {
+void aom_write_primitive_symmetric(aom_writer *w, int word,
+ unsigned int abs_bits) {
if (word == 0) {
- vp10_write_bit(w, 0);
+ aom_write_bit(w, 0);
} else {
const int x = abs(word);
const int s = word < 0;
- vp10_write_bit(w, 1);
- vp10_write_bit(w, s);
- vp10_write_literal(w, x - 1, abs_bits);
+ aom_write_bit(w, 1);
+ aom_write_bit(w, s);
+ aom_write_literal(w, x - 1, abs_bits);
}
}
diff --git a/av1/encoder/subexp.h b/av1/encoder/subexp.h
index 82ce2e0..c829f2d 100644
--- a/av1/encoder/subexp.h
+++ b/av1/encoder/subexp.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_SUBEXP_H_
-#define VP10_ENCODER_SUBEXP_H_
+#ifndef AV1_ENCODER_SUBEXP_H_
+#define AV1_ENCODER_SUBEXP_H_
#ifdef __cplusplus
extern "C" {
@@ -17,30 +17,29 @@
#include "aom_dsp/prob.h"
-struct vp10_writer;
+struct aom_writer;
-void vp10_write_prob_diff_update(struct vp10_writer *w, vpx_prob newp,
- vpx_prob oldp);
+void av1_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
+ aom_prob oldp);
-void vp10_cond_prob_diff_update(struct vp10_writer *w, vpx_prob *oldp,
- const unsigned int ct[2]);
+void av1_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
+ const unsigned int ct[2]);
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
- vpx_prob *bestp, vpx_prob upd);
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+ aom_prob *bestp, aom_prob upd);
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
- const vpx_prob *oldp,
- vpx_prob *bestp, vpx_prob upd,
- int stepsize);
-int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
- const unsigned int ct[2]);
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
+ const aom_prob *oldp,
+ aom_prob *bestp, aom_prob upd,
+ int stepsize);
+int av1_cond_prob_diff_update_savings(aom_prob *oldp, const unsigned int ct[2]);
#if CONFIG_ENTROPY
-int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
- vpx_prob *bestp, vpx_prob upd, int n);
-int vp10_prob_update_search_model_subframe(
- unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
- vpx_prob *bestp, vpx_prob upd, int stepsize, int n);
+int av1_prob_update_search_subframe(unsigned int ct[][2], aom_prob oldp,
+ aom_prob *bestp, aom_prob upd, int n);
+int av1_prob_update_search_model_subframe(
+ unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const aom_prob *oldp,
+ aom_prob *bestp, aom_prob upd, int stepsize, int n);
#endif // CONFIG_ENTROPY
//
@@ -48,10 +47,10 @@
// 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
// indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
// and 1 more bit for the sign if non-zero.
-void vp10_write_primitive_symmetric(vp10_writer *w, int word,
- unsigned int mag_bits);
+void aom_write_primitive_symmetric(aom_writer *w, int word,
+ unsigned int mag_bits);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SUBEXP_H_
+#endif // AV1_ENCODER_SUBEXP_H_
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index 32490cc..4a5de37 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -24,11 +24,11 @@
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/segmentation.h"
#include "av1/encoder/temporal_filter.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
static void temporal_filter_predictors_mb_c(
MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
@@ -61,41 +61,41 @@
mv_precision_uv = MV_PRECISION_Q3;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
- scale, 16, 16, which_mv, interp_filter,
- MV_PRECISION_Q3, x, y, xd->bd);
+ av1_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale,
+ 16, 16, which_mv, interp_filter,
+ MV_PRECISION_Q3, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(
- u_mb_ptr, uv_stride, &pred[256], uv_block_width, &mv, scale,
- uv_block_width, uv_block_height, which_mv, interp_filter,
- mv_precision_uv, x, y, xd->bd);
+ av1_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+ uv_block_width, &mv, scale, uv_block_width,
+ uv_block_height, which_mv, interp_filter,
+ mv_precision_uv, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(
- v_mb_ptr, uv_stride, &pred[512], uv_block_width, &mv, scale,
- uv_block_width, uv_block_height, which_mv, interp_filter,
- mv_precision_uv, x, y, xd->bd);
+ av1_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+ uv_block_width, &mv, scale, uv_block_width,
+ uv_block_height, which_mv, interp_filter,
+ mv_precision_uv, x, y, xd->bd);
return;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
- which_mv, interp_filter, MV_PRECISION_Q3, x, y);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+ which_mv, interp_filter, MV_PRECISION_Q3, x, y);
- vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
- &mv, scale, uv_block_width, uv_block_height,
- which_mv, interp_filter, mv_precision_uv, x, y);
+ av1_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, interp_filter, mv_precision_uv, x, y);
- vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
- &mv, scale, uv_block_width, uv_block_height,
- which_mv, interp_filter, mv_precision_uv, x, y);
+ av1_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, interp_filter, mv_precision_uv, x, y);
}
-void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
- uint8_t *frame2, unsigned int block_width,
- unsigned int block_height, int strength,
- int filter_weight, unsigned int *accumulator,
- uint16_t *count) {
+void av1_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+ uint8_t *frame2, unsigned int block_width,
+ unsigned int block_height, int strength,
+ int filter_weight, unsigned int *accumulator,
+ uint16_t *count) {
unsigned int i, j, k;
int modifier;
int byte = 0;
@@ -152,8 +152,8 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_temporal_filter_apply_c(
uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
unsigned int block_width, unsigned int block_height, int strength,
int filter_weight, unsigned int *accumulator, uint16_t *count) {
@@ -214,9 +214,9 @@
byte += stride - block_width;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
+static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
uint8_t *arf_frame_buf,
uint8_t *frame_ptr_buf,
int stride) {
@@ -247,7 +247,7 @@
xd->plane[0].pre[0].stride = stride;
step_param = mv_sf->reduce_first_step_size;
- step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+ step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
#if CONFIG_REF_MV
x->mvcost = x->mv_cost_stack[0];
@@ -257,9 +257,9 @@
#endif
// Ignore mv costing by sending NULL pointer instead of cost arrays
- vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
- cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
- &best_ref_mv1);
+ av1_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
+ cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
+ &best_ref_mv1);
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(
@@ -277,7 +277,7 @@
return bestsme;
}
-static void temporal_filter_iterate_c(VP10_COMP *cpi,
+static void temporal_filter_iterate_c(AV1_COMP *cpi,
YV12_BUFFER_CONFIG **frames,
int frame_count, int alt_ref_index,
int strength,
@@ -295,7 +295,7 @@
MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
uint8_t *dst1, *dst2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
uint8_t *predictor;
@@ -308,7 +308,7 @@
// Save input state
uint8_t *input_buffer[MAX_MB_PLANE];
int i;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
predictor = CONVERT_TO_BYTEPTR(predictor16);
} else {
@@ -320,19 +320,19 @@
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
// Source frames are extended to 16 pixels. This is different than
- // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
+ // L/A/G reference frames that have a border of 32 (AV1ENCBORDERINPIXELS)
// A 6/8 tap filter is used for motion search. This requires 2 pixels
// before and 3 pixels after. So the largest Y mv on a border would
- // then be 16 - VPX_INTERP_EXTEND. The UV blocks are half the size of the
+ // then be 16 - AOM_INTERP_EXTEND. The UV blocks are half the size of the
// Y and therefore only extended by 8. The largest mv that a UV block
- // can support is 8 - VPX_INTERP_EXTEND. A UV mv is half of a Y mv.
- // (16 - VPX_INTERP_EXTEND) >> 1 which is greater than
- // 8 - VPX_INTERP_EXTEND.
+ // can support is 8 - AOM_INTERP_EXTEND. A UV mv is half of a Y mv.
+ // (16 - AOM_INTERP_EXTEND) >> 1 which is greater than
+ // 8 - AOM_INTERP_EXTEND.
// To keep the mv in play for both Y and UV planes the max that it
- // can be on a border is therefore 16 - (2*VPX_INTERP_EXTEND+1).
- cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+ // can be on a border is therefore 16 - (2*AOM_INTERP_EXTEND+1).
+ cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * AOM_INTERP_EXTEND));
cpi->td.mb.mv_row_max =
- ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+ ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * AOM_INTERP_EXTEND);
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
int i, j, k;
@@ -341,9 +341,9 @@
memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
- cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+ cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * AOM_INTERP_EXTEND));
cpi->td.mb.mv_col_max =
- ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+ ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * AOM_INTERP_EXTEND);
for (frame = 0; frame < frame_count; frame++) {
const int thresh_low = 10000;
@@ -378,53 +378,53 @@
mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
mb_col * 16, mb_row * 16);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int adj_strength = strength + 2 * (mbd->bd - 8);
// Apply the filter (YUV)
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
adj_strength, filter_weight, accumulator, count);
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
mb_uv_width, mb_uv_height, adj_strength, filter_weight,
accumulator + 256, count + 256);
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
mb_uv_width, mb_uv_height, adj_strength, filter_weight,
accumulator + 512, count + 512);
} else {
// Apply the filter (YUV)
- vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16, strength,
- filter_weight, accumulator, count);
- vp10_temporal_filter_apply_c(
+ av1_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+ predictor, 16, 16, strength,
+ filter_weight, accumulator, count);
+ av1_temporal_filter_apply_c(
f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
mb_uv_width, mb_uv_height, strength, filter_weight,
accumulator + 256, count + 256);
- vp10_temporal_filter_apply_c(
+ av1_temporal_filter_apply_c(
f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
mb_uv_width, mb_uv_height, strength, filter_weight,
accumulator + 512, count + 512);
}
#else
// Apply the filter (YUV)
- vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16, strength,
- filter_weight, accumulator, count);
- vp10_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
- predictor + 256, mb_uv_width,
- mb_uv_height, strength, filter_weight,
- accumulator + 256, count + 256);
- vp10_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
- predictor + 512, mb_uv_width,
- mb_uv_height, strength, filter_weight,
- accumulator + 512, count + 512);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ av1_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+ predictor, 16, 16, strength,
+ filter_weight, accumulator, count);
+ av1_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 256, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 256, count + 256);
+ av1_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 512, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 512, count + 512);
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *dst1_16;
uint16_t *dst2_16;
@@ -544,7 +544,7 @@
}
byte += stride - mb_uv_width;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
mb_y_offset += 16;
mb_uv_offset += mb_uv_width;
}
@@ -557,11 +557,11 @@
}
// Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
+static void adjust_arnr_filter(AV1_COMP *cpi, int distance, int group_boost,
int *arnr_frames, int *arnr_strength) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int frames_after_arf =
- vp10_lookahead_depth(cpi->lookahead) - distance - 1;
+ av1_lookahead_depth(cpi->lookahead) - distance - 1;
int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
int frames_bwd;
int q, frames, strength;
@@ -581,11 +581,11 @@
// Adjust the strength based on active max q.
if (cpi->common.current_video_frame > 1)
- q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
- cpi->common.bit_depth));
+ q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+ cpi->common.bit_depth));
else
- q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
- cpi->common.bit_depth));
+ q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+ cpi->common.bit_depth));
if (q > 16) {
strength = oxcf->arnr_strength;
} else {
@@ -615,7 +615,7 @@
*arnr_strength = strength;
}
-void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
+void av1_temporal_filter(AV1_COMP *cpi, int distance) {
RATE_CONTROL *const rc = &cpi->rc;
int frame;
int frames_to_blur;
@@ -658,7 +658,7 @@
for (frame = 0; frame < frames_to_blur; ++frame) {
const int which_buffer = start_frame - frame;
struct lookahead_entry *buf =
- vp10_lookahead_peek(cpi->lookahead, which_buffer);
+ av1_lookahead_peek(cpi->lookahead, which_buffer);
frames[frames_to_blur - 1 - frame] = &buf->img;
}
@@ -666,16 +666,16 @@
// Setup scaling factors. Scaling on each of the arnr frames is not
// supported.
// ARF is produced at the native frame size and resized when coded.
-#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
frames[0]->y_crop_width, frames[0]->y_crop_height,
cpi->common.use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(
+ av1_setup_scale_factors_for_frame(
&sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
frames[0]->y_crop_width, frames[0]->y_crop_height);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
temporal_filter_iterate_c(cpi, frames, frames_to_blur,
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
index ce5291a..ef21215 100644
--- a/av1/encoder/temporal_filter.h
+++ b/av1/encoder/temporal_filter.h
@@ -8,17 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_
-#define VP10_ENCODER_TEMPORAL_FILTER_H_
+#ifndef AV1_ENCODER_TEMPORAL_FILTER_H_
+#define AV1_ENCODER_TEMPORAL_FILTER_H_
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_temporal_filter(VP10_COMP *cpi, int distance);
+void av1_temporal_filter(AV1_COMP *cpi, int distance);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_TEMPORAL_FILTER_H_
+#endif // AV1_ENCODER_TEMPORAL_FILTER_H_
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index c841fa6..6a5dc21 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -13,7 +13,7 @@
#include <stdio.h>
#include <string.h>
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/entropy.h"
#include "av1/common/pred_common.h"
@@ -45,14 +45,14 @@
{ 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
{ 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
};
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+const TOKENVALUE *av1_dct_cat_lt_10_value_tokens =
dct_cat_lt_10_value_tokens +
(sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
2;
// The corresponding costs of the extrabits for the tokens in the above table
// are stored in the table below. The values are obtained from looking up the
// entry for the specified extrabits in the table corresponding to the token
-// (as defined in cost element vp10_extra_bits)
+// (as defined in cost element av1_extra_bits)
// e.g. {9, 63} maps to cat5_cost[63 >> 1], {1, 1} maps to sign_cost[1 >> 1]
static const int dct_cat_lt_10_value_cost[] = {
3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531, 3432, 3409, 3363, 3340, 3282,
@@ -67,13 +67,13 @@
3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432, 3531, 3554, 3600, 3623, 3681,
3704, 3750, 3773,
};
-const int *vp10_dct_cat_lt_10_value_cost =
+const int *av1_dct_cat_lt_10_value_cost =
dct_cat_lt_10_value_cost +
(sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost)) / 2;
// Array indices are identical to previously-existing CONTEXT_NODE indices
/* clang-format off */
-const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-EOB_TOKEN, 2, // 0 = EOB
-ZERO_TOKEN, 4, // 1 = ZERO
-ONE_TOKEN, 6, // 2 = ONE
@@ -88,12 +88,12 @@
};
/* clang-format on */
-static const vpx_tree_index cat1[2] = { 0, 0 };
-static const vpx_tree_index cat2[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6[28] = { 2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
+static const aom_tree_index cat1[2] = { 0, 0 };
+static const aom_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6[28] = { 2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
12, 12, 14, 14, 16, 16, 18, 18, 20, 20,
22, 22, 24, 24, 26, 26, 0, 0 };
@@ -111,7 +111,7 @@
2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
};
-const int16_t vp10_cat6_low_cost[256] = {
+const int16_t av1_cat6_low_cost[256] = {
3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
@@ -133,7 +133,7 @@
6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
};
-const int vp10_cat6_high_cost[64] = {
+const int av1_cat6_high_cost[64] = {
88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305,
8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889,
9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666,
@@ -142,8 +142,8 @@
15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
};
-#if CONFIG_VP9_HIGHBITDEPTH
-const int vp10_cat6_high10_high_cost[256] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const int av1_cat6_high10_high_cost[256] = {
94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311,
8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895,
9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672,
@@ -169,7 +169,7 @@
18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
24237, 24713, 26876
};
-const int vp10_cat6_high12_high_cost[1024] = {
+const int av1_cat6_high12_high_cost[1024] = {
100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317,
8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901,
9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678,
@@ -267,82 +267,82 @@
};
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
-static const vpx_tree_index cat1_high10[2] = { 0, 0 };
-static const vpx_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6_high10[32] = { 2, 2, 4, 4, 6, 6, 8, 8,
+#if CONFIG_AOM_HIGHBITDEPTH
+static const aom_tree_index cat1_high10[2] = { 0, 0 };
+static const aom_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6_high10[32] = { 2, 2, 4, 4, 6, 6, 8, 8,
10, 10, 12, 12, 14, 14, 16, 16,
18, 18, 20, 20, 22, 22, 24, 24,
26, 26, 28, 28, 30, 30, 0, 0 };
-static const vpx_tree_index cat1_high12[2] = { 0, 0 };
-static const vpx_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6_high12[36] = {
+static const aom_tree_index cat1_high12[2] = { 0, 0 };
+static const aom_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6_high12[36] = {
2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, 16, 16, 18, 18,
20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, 30, 32, 32, 34, 34, 0, 0
};
#endif
-const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
- { 0, 0, 0, 0, zero_cost }, // ZERO_TOKEN
- { 0, 0, 0, 1, sign_cost }, // ONE_TOKEN
- { 0, 0, 0, 2, sign_cost }, // TWO_TOKEN
- { 0, 0, 0, 3, sign_cost }, // THREE_TOKEN
- { 0, 0, 0, 4, sign_cost }, // FOUR_TOKEN
- { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
- { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
- { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
- { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
- { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
- { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
- { 0, 0, 0, 0, zero_cost } // EOB_TOKEN
+const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS] = {
+ { 0, 0, 0, 0, zero_cost }, // ZERO_TOKEN
+ { 0, 0, 0, 1, sign_cost }, // ONE_TOKEN
+ { 0, 0, 0, 2, sign_cost }, // TWO_TOKEN
+ { 0, 0, 0, 3, sign_cost }, // THREE_TOKEN
+ { 0, 0, 0, 4, sign_cost }, // FOUR_TOKEN
+ { cat1, av1_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
+ { cat2, av1_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
+ { cat3, av1_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
+ { cat4, av1_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
+ { cat5, av1_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
+ { cat6, av1_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
+ { 0, 0, 0, 0, zero_cost } // EOB_TOKEN
};
-#if CONFIG_VP9_HIGHBITDEPTH
-const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
- { 0, 0, 0, 0, zero_cost }, // ZERO
- { 0, 0, 0, 1, sign_cost }, // ONE
- { 0, 0, 0, 2, sign_cost }, // TWO
- { 0, 0, 0, 3, sign_cost }, // THREE
- { 0, 0, 0, 4, sign_cost }, // FOUR
- { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
- { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
- { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
- { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
- { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
- { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 }, // CAT6
- { 0, 0, 0, 0, zero_cost } // EOB
+#if CONFIG_AOM_HIGHBITDEPTH
+const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS] = {
+ { 0, 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 0, 4, sign_cost }, // FOUR
+ { cat1_high10, av1_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high10, av1_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high10, av1_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high10, av1_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high10, av1_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high10, av1_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, 0, zero_cost } // EOB
};
-const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
- { 0, 0, 0, 0, zero_cost }, // ZERO
- { 0, 0, 0, 1, sign_cost }, // ONE
- { 0, 0, 0, 2, sign_cost }, // TWO
- { 0, 0, 0, 3, sign_cost }, // THREE
- { 0, 0, 0, 4, sign_cost }, // FOUR
- { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
- { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
- { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
- { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
- { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
- { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
- { 0, 0, 0, 0, zero_cost } // EOB
+const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS] = {
+ { 0, 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 0, 4, sign_cost }, // FOUR
+ { cat1_high12, av1_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high12, av1_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high12, av1_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high12, av1_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high12, av1_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high12, av1_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, 0, zero_cost } // EOB
};
#endif
#if !CONFIG_ANS
-const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
+const struct av1_token av1_coef_encodings[ENTROPY_TOKENS] = {
{ 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 },
{ 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
};
#endif // !CONFIG_ANS
struct tokenize_b_args {
- VP10_COMP *cpi;
+ AV1_COMP *cpi;
ThreadData *td;
TOKENEXTRA **tp;
};
@@ -356,11 +356,11 @@
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
- vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
- blk_row);
+ av1_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+ blk_row);
}
-static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
+static INLINE void add_token(TOKENEXTRA **t, const aom_prob *context_tree,
#if CONFIG_ANS
const rans_dec_lut *token_cdf,
#endif // CONFIG_ANS
@@ -378,7 +378,7 @@
}
static INLINE void add_token_no_extra(TOKENEXTRA **t,
- const vpx_prob *context_tree,
+ const aom_prob *context_tree,
uint8_t token, uint8_t skip_eob_node,
unsigned int *counts) {
(*t)->token = token;
@@ -394,8 +394,8 @@
return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
}
-void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
- int plane, TOKENEXTRA **t) {
+void av1_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+ int plane, TOKENEXTRA **t) {
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -408,14 +408,14 @@
(xd->plane[plane != 0].subsampling_y);
const int cols = (4 * num_4x4_blocks_wide_lookup[bsize]) >>
(xd->plane[plane != 0].subsampling_x);
- const vpx_prob (*const probs)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
- plane == 0 ? vp10_default_palette_y_color_prob
- : vp10_default_palette_uv_color_prob;
+ const aom_prob (*const probs)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+ plane == 0 ? av1_default_palette_y_color_prob
+ : av1_default_palette_uv_color_prob;
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
color_ctx =
- vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
+ av1_get_palette_color_context(color_map, cols, i, j, n, color_order);
for (k = 0; k < n; ++k)
if (color_map[i * cols + j] == color_order[k]) {
color_new_idx = k;
@@ -433,7 +433,7 @@
static void tokenize_b(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct tokenize_b_args *const args = arg;
- VP10_COMP *cpi = args->cpi;
+ AV1_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -449,7 +449,7 @@
const PLANE_TYPE type = pd->plane_type;
const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
#if CONFIG_SUPERTX
- const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+ const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx);
#else
const int segment_id = mbmi->segment_id;
#endif // CONFIG_SUEPRTX
@@ -460,11 +460,11 @@
unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
#if CONFIG_ENTROPY
- vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
cpi->subframe_stats.coef_probs_buf[cpi->common.coef_probs_update_idx]
[txsize_sqr_map[tx_size]][type][ref];
#else
- vpx_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ aom_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
cpi->common.fc->coef_probs[txsize_sqr_map[tx_size]][type][ref];
#endif // CONFIG_ENTROPY
#if CONFIG_ANS
@@ -488,7 +488,7 @@
const int v = qcoeff[scan[c]];
eob_branch[band[c]][pt] += !skip_eob;
- vp10_get_token_extra(v, &token, &extra);
+ av1_get_token_extra(v, &token, &extra);
add_token(&t, coef_probs[band[c]][pt],
#if CONFIG_ANS
@@ -496,7 +496,7 @@
#endif // CONFIG_ANS
extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]);
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
pt = get_coef_context(nb, token_cache, c);
skip_eob = (token == ZERO_TOKEN);
@@ -509,7 +509,7 @@
*tp = t;
- vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
+ av1_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
}
struct is_skippable_args {
@@ -528,12 +528,12 @@
}
// TODO(yaowu): rewrite and optimize this function to remove the usage of
-// vp10_foreach_transform_block() and simplify is_skippable().
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+// av1_foreach_transform_block() and simplify is_skippable().
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
struct is_skippable_args args = { x->plane[plane].eobs, &result };
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
- &args);
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+ &args);
return result;
}
@@ -550,11 +550,11 @@
*(args->skippable) |= (args->eobs[block] > eobs);
}
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 0;
struct is_skippable_args args = { x->plane[plane].eobs, &result };
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
- has_high_freq_coeff, &args);
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
+ has_high_freq_coeff, &args);
return result;
}
@@ -615,15 +615,15 @@
}
}
-void vp10_tokenize_sb_inter(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- int dry_run, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb_inter(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int dry_run, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TOKENEXTRA *t_backup = *t;
- const int ctx = vp10_get_skip_context(xd);
+ const int ctx = av1_get_skip_context(xd);
const int skip_inc =
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
struct tokenize_b_args arg = { cpi, td, t };
@@ -669,13 +669,13 @@
}
#endif // CONFIG_VAR_TX
-void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- int dry_run, BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t, int dry_run,
+ BLOCK_SIZE bsize) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- const int ctx = vp10_get_skip_context(xd);
+ const int ctx = av1_get_skip_context(xd);
const int skip_inc =
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
struct tokenize_b_args arg = { cpi, td, t };
@@ -691,24 +691,24 @@
td->counts->skip[ctx][0] += skip_inc;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
- &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+ &arg);
(*t)->token = EOSB_TOKEN;
(*t)++;
}
} else {
- vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+ av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
}
}
#if CONFIG_SUPERTX
-void vp10_tokenize_sb_supertx(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- int dry_run, BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb_supertx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int dry_run, BLOCK_SIZE bsize) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &td->mb.e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TOKENEXTRA *t_backup = *t;
- const int ctx = vp10_get_skip_context(xd);
+ const int ctx = av1_get_skip_context(xd);
const int skip_inc =
!segfeature_active(&cm->seg, mbmi->segment_id_supertx, SEG_LVL_SKIP);
struct tokenize_b_args arg = { cpi, td, t };
@@ -724,13 +724,13 @@
td->counts->skip[ctx][0] += skip_inc;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
- &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+ &arg);
(*t)->token = EOSB_TOKEN;
(*t)++;
}
} else {
- vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+ av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
*t = t_backup;
}
}
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index 7ae8676..3f43405 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_TOKENIZE_H_
-#define VP10_ENCODER_TOKENIZE_H_
+#ifndef AV1_ENCODER_TOKENIZE_H_
+#define AV1_ENCODER_TOKENIZE_H_
#include "av1/common/entropy.h"
@@ -22,7 +22,7 @@
#define EOSB_TOKEN 127 // Not signalled, encoder only
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef int32_t EXTRABIT;
#else
typedef int16_t EXTRABIT;
@@ -34,7 +34,7 @@
} TOKENVALUE;
typedef struct {
- const vpx_prob *context_tree;
+ const aom_prob *context_tree;
#if CONFIG_ANS
const rans_dec_lut *token_cdf;
#endif // CONFIG_ANS
@@ -43,68 +43,67 @@
uint8_t skip_eob_node;
} TOKENEXTRA;
-extern const vpx_tree_index vp10_coef_tree[];
-extern const vpx_tree_index vp10_coef_con_tree[];
+extern const aom_tree_index av1_coef_tree[];
+extern const aom_tree_index av1_coef_con_tree[];
#if !CONFIG_ANS
-extern const struct vp10_token vp10_coef_encodings[];
+extern const struct av1_token av1_coef_encodings[];
#endif // !CONFIG_ANS
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
#if CONFIG_VAR_TX
-void vp10_tokenize_sb_inter(struct VP10_COMP *cpi, struct ThreadData *td,
- TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
- BLOCK_SIZE bsize);
+void av1_tokenize_sb_inter(struct AV1_COMP *cpi, struct ThreadData *td,
+ TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
#endif
-void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
- int plane, TOKENEXTRA **t);
-void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
- TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+void av1_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+ int plane, TOKENEXTRA **t);
+void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
+ TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
#if CONFIG_SUPERTX
-void vp10_tokenize_sb_supertx(struct VP10_COMP *cpi, struct ThreadData *td,
- TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+void av1_tokenize_sb_supertx(struct AV1_COMP *cpi, struct ThreadData *td,
+ TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
#endif
-extern const int16_t *vp10_dct_value_cost_ptr;
+extern const int16_t *av1_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the
* fields are not.
*/
-extern const TOKENVALUE *vp10_dct_value_tokens_ptr;
-extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens;
-extern const int *vp10_dct_cat_lt_10_value_cost;
-extern const int16_t vp10_cat6_low_cost[256];
-extern const int vp10_cat6_high_cost[64];
-extern const int vp10_cat6_high10_high_cost[256];
-extern const int vp10_cat6_high12_high_cost[1024];
-static INLINE int vp10_get_cost(int16_t token, EXTRABIT extrabits,
- const int *cat6_high_table) {
+extern const TOKENVALUE *av1_dct_value_tokens_ptr;
+extern const TOKENVALUE *av1_dct_cat_lt_10_value_tokens;
+extern const int *av1_dct_cat_lt_10_value_cost;
+extern const int16_t av1_cat6_low_cost[256];
+extern const int av1_cat6_high_cost[64];
+extern const int av1_cat6_high10_high_cost[256];
+extern const int av1_cat6_high12_high_cost[1024];
+static INLINE int av1_get_cost(int16_t token, EXTRABIT extrabits,
+ const int *cat6_high_table) {
if (token != CATEGORY6_TOKEN)
- return vp10_extra_bits[token].cost[extrabits >> 1];
- return vp10_cat6_low_cost[(extrabits >> 1) & 0xff] +
+ return av1_extra_bits[token].cost[extrabits >> 1];
+ return av1_cat6_low_cost[(extrabits >> 1) & 0xff] +
cat6_high_table[extrabits >> 9];
}
-#if CONFIG_VP9_HIGHBITDEPTH
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
- return bit_depth == 8 ? vp10_cat6_high_cost
- : (bit_depth == 10 ? vp10_cat6_high10_high_cost
- : vp10_cat6_high12_high_cost);
+#if CONFIG_AOM_HIGHBITDEPTH
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
+ return bit_depth == 8 ? av1_cat6_high_cost
+ : (bit_depth == 10 ? av1_cat6_high10_high_cost
+ : av1_cat6_high12_high_cost);
}
#else
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
(void)bit_depth;
- return vp10_cat6_high_cost;
+ return av1_cat6_high_cost;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static INLINE void vp10_get_token_extra(int v, int16_t *token,
- EXTRABIT *extra) {
+static INLINE void av1_get_token_extra(int v, int16_t *token, EXTRABIT *extra) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
*token = CATEGORY6_TOKEN;
if (v >= CAT6_MIN_VAL)
@@ -113,29 +112,29 @@
*extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
return;
}
- *token = vp10_dct_cat_lt_10_value_tokens[v].token;
- *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
+ *token = av1_dct_cat_lt_10_value_tokens[v].token;
+ *extra = av1_dct_cat_lt_10_value_tokens[v].extra;
}
-static INLINE int16_t vp10_get_token(int v) {
+static INLINE int16_t av1_get_token(int v) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
- return vp10_dct_cat_lt_10_value_tokens[v].token;
+ return av1_dct_cat_lt_10_value_tokens[v].token;
}
-static INLINE int vp10_get_token_cost(int v, int16_t *token,
- const int *cat6_high_table) {
+static INLINE int av1_get_token_cost(int v, int16_t *token,
+ const int *cat6_high_table) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
EXTRABIT extrabits;
*token = CATEGORY6_TOKEN;
extrabits = abs(v) - CAT6_MIN_VAL;
- return vp10_cat6_low_cost[extrabits & 0xff] +
+ return av1_cat6_low_cost[extrabits & 0xff] +
cat6_high_table[extrabits >> 8];
}
- *token = vp10_dct_cat_lt_10_value_tokens[v].token;
- return vp10_dct_cat_lt_10_value_cost[v];
+ *token = av1_dct_cat_lt_10_value_tokens[v].token;
+ return av1_dct_cat_lt_10_value_cost[v];
}
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_TOKENIZE_H_
+#endif // AV1_ENCODER_TOKENIZE_H_
diff --git a/av1/encoder/treewriter.c b/av1/encoder/treewriter.c
index d3fcd45..5fbc857 100644
--- a/av1/encoder/treewriter.c
+++ b/av1/encoder/treewriter.c
@@ -10,13 +10,13 @@
#include "av1/encoder/treewriter.h"
-static void tree2tok(struct vp10_token *tokens, const vpx_tree_index *tree,
+static void tree2tok(struct av1_token *tokens, const aom_tree_index *tree,
int i, int v, int l) {
v += v;
++l;
do {
- const vpx_tree_index j = tree[i++];
+ const aom_tree_index j = tree[i++];
if (j <= 0) {
tokens[-j].value = v;
tokens[-j].len = l;
@@ -26,12 +26,12 @@
} while (++v & 1);
}
-void vp10_tokens_from_tree(struct vp10_token *tokens,
- const vpx_tree_index *tree) {
+void av1_tokens_from_tree(struct av1_token *tokens,
+ const aom_tree_index *tree) {
tree2tok(tokens, tree, 0, 0, 0);
}
-static unsigned int convert_distribution(unsigned int i, vpx_tree tree,
+static unsigned int convert_distribution(unsigned int i, aom_tree tree,
unsigned int branch_ct[][2],
const unsigned int num_events[]) {
unsigned int left, right;
@@ -51,8 +51,8 @@
return left + right;
}
-void vp10_tree_probs_from_distribution(vpx_tree tree,
- unsigned int branch_ct[/* n-1 */][2],
- const unsigned int num_events[/* n */]) {
+void av1_tree_probs_from_distribution(aom_tree tree,
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */]) {
convert_distribution(0, tree, branch_ct, num_events);
}
diff --git a/av1/encoder/treewriter.h b/av1/encoder/treewriter.h
index 43c615f..9a66115 100644
--- a/av1/encoder/treewriter.h
+++ b/av1/encoder/treewriter.h
@@ -8,37 +8,37 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_TREEWRITER_H_
-#define VP10_ENCODER_TREEWRITER_H_
+#ifndef AV1_ENCODER_TREEWRITER_H_
+#define AV1_ENCODER_TREEWRITER_H_
-#ifdef VP10_FORCE_VPXBOOL_TREEWRITER
+#ifdef AV1_FORCE_AOMBOOL_TREEWRITER
#include "aom_dsp/bitwriter.h"
-#define tree_writer vpx_writer
-#define tree_bit_write vpx_write
+#define tree_writer aom_writer
+#define tree_bit_write aom_write
#else
#include "av1/encoder/bitwriter.h"
-#define tree_writer vp10_writer
-#define tree_bit_write vp10_write
+#define tree_writer aom_writer
+#define tree_bit_write aom_write
#endif
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_tree_probs_from_distribution(vpx_tree tree,
- unsigned int branch_ct[/* n - 1 */][2],
- const unsigned int num_events[/* n */]);
+void av1_tree_probs_from_distribution(aom_tree tree,
+ unsigned int branch_ct[/* n - 1 */][2],
+ const unsigned int num_events[/* n */]);
-struct vp10_token {
+struct av1_token {
int value;
int len;
};
-void vp10_tokens_from_tree(struct vp10_token *, const vpx_tree_index *);
+void av1_tokens_from_tree(struct av1_token *, const aom_tree_index *);
-static INLINE void vp10_write_tree(tree_writer *w, const vpx_tree_index *tree,
- const vpx_prob *probs, int bits, int len,
- vpx_tree_index i) {
+static INLINE void av1_write_tree(tree_writer *w, const aom_tree_index *tree,
+ const aom_prob *probs, int bits, int len,
+ aom_tree_index i) {
do {
const int bit = (bits >> --len) & 1;
tree_bit_write(w, bit, probs[i >> 1]);
@@ -46,10 +46,10 @@
} while (len);
}
-static INLINE void vp10_write_token(tree_writer *w, const vpx_tree_index *tree,
- const vpx_prob *probs,
- const struct vp10_token *token) {
- vp10_write_tree(w, tree, probs, token->value, token->len, 0);
+static INLINE void av1_write_token(tree_writer *w, const aom_tree_index *tree,
+ const aom_prob *probs,
+ const struct av1_token *token) {
+ av1_write_tree(w, tree, probs, token->value, token->len, 0);
}
#undef tree_writer
@@ -58,4 +58,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_TREEWRITER_H_
+#endif // AV1_ENCODER_TREEWRITER_H_
diff --git a/av1/encoder/variance_tree.c b/av1/encoder/variance_tree.c
index 219d39a..3a23027 100644
--- a/av1/encoder/variance_tree.c
+++ b/av1/encoder/variance_tree.c
@@ -11,7 +11,7 @@
#include "av1/encoder/variance_tree.h"
#include "av1/encoder/encoder.h"
-void vp10_setup_var_tree(struct VP10Common *cm, ThreadData *td) {
+void av1_setup_var_tree(struct AV1Common *cm, ThreadData *td) {
int i, j;
#if CONFIG_EXT_PARTITION
const int leaf_nodes = 1024;
@@ -24,9 +24,9 @@
VAR_TREE *this_var;
int nodes;
- vpx_free(td->var_tree);
+ aom_free(td->var_tree);
CHECK_MEM_ERROR(cm, td->var_tree,
- vpx_calloc(tree_nodes, sizeof(*td->var_tree)));
+ aom_calloc(tree_nodes, sizeof(*td->var_tree)));
this_var = &td->var_tree[0];
@@ -54,7 +54,7 @@
}
}
-void vp10_free_var_tree(ThreadData *td) {
- vpx_free(td->var_tree);
+void av1_free_var_tree(ThreadData *td) {
+ aom_free(td->var_tree);
td->var_tree = NULL;
}
diff --git a/av1/encoder/variance_tree.h b/av1/encoder/variance_tree.h
index 08c40d3..728d7f4 100644
--- a/av1/encoder/variance_tree.h
+++ b/av1/encoder/variance_tree.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_ENCODER_VARIANCE_TREE_H_
-#define VP10_ENCODER_VARIANCE_TREE_H_
+#ifndef AV1_ENCODER_VARIANCE_TREE_H_
+#define AV1_ENCODER_VARIANCE_TREE_H_
#include <assert.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "av1/common/enums.h"
@@ -23,7 +23,7 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
struct ThreadData;
typedef struct {
@@ -50,13 +50,13 @@
int ref_stride;
int width;
int height;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int highbd;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} VAR_TREE;
-void vp10_setup_var_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_var_tree(struct ThreadData *td);
+void av1_setup_var_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_var_tree(struct ThreadData *td);
// Set variance values given sum square error, sum error, count.
static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) {
@@ -92,4 +92,4 @@
} // extern "C"
#endif
-#endif /* VP10_ENCODER_VARIANCE_TREE_H_ */
+#endif /* AV1_ENCODER_VARIANCE_TREE_H_ */
diff --git a/av1/encoder/wedge_utils.c b/av1/encoder/wedge_utils.c
index 548bc48..596c5df 100644
--- a/av1/encoder/wedge_utils.c
+++ b/av1/encoder/wedge_utils.c
@@ -10,11 +10,11 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "av1/common/reconinter.h"
@@ -48,8 +48,8 @@
* holds for 8 bit input, and on real input, it should hold practically always,
* as residuals are expected to be small.
*/
-uint64_t vp10_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
- const uint8_t *m, int N) {
+uint64_t av1_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
uint64_t csse = 0;
int i;
assert(N % 64 == 0);
@@ -92,8 +92,8 @@
* Note that for efficiency, ds is stored on 16 bits. Real input residuals
* being small, this should not cause a noticeable issue.
*/
-int vp10_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N,
- int64_t limit) {
+int av1_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N,
+ int64_t limit) {
int64_t acc = 0;
assert(N % 64 == 0);
@@ -117,8 +117,8 @@
*
* The result is saturated to signed 16 bits.
*/
-void vp10_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a,
- const int16_t *b, int N) {
+void av1_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a,
+ const int16_t *b, int N) {
int i;
assert(N % 64 == 0);
diff --git a/av1/encoder/x86/vp10_highbd_quantize_sse4.c b/av1/encoder/x86/av1_highbd_quantize_sse4.c
similarity index 98%
rename from av1/encoder/x86/vp10_highbd_quantize_sse4.c
rename to av1/encoder/x86/av1_highbd_quantize_sse4.c
index 8b05c6a..dd3405f 100644
--- a/av1/encoder/x86/vp10_highbd_quantize_sse4.c
+++ b/av1/encoder/x86/av1_highbd_quantize_sse4.c
@@ -11,8 +11,8 @@
#include <smmintrin.h>
#include <stdint.h>
-#include "./vp10_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./av1_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
// Coefficient quantization phase 1
// param[0-2] : rounding/quan/dequan constants
@@ -106,7 +106,7 @@
return eobValue;
}
-void vp10_highbd_quantize_fp_sse4_1(
+void av1_highbd_quantize_fp_sse4_1(
const tran_low_t *coeff_ptr, intptr_t count, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index 6fe3ada..6b88879 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -11,8 +11,8 @@
#include <assert.h>
#include <emmintrin.h> // SSE2
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -200,12 +200,12 @@
}
#endif // CONFIG_EXT_TX
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[4];
switch (tx_type) {
- case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
+ case DCT_DCT: aom_fdct4x4_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_4x4(input, in, stride, 0, 0);
fadst4_sse2(in);
@@ -296,12 +296,14 @@
}
}
-void vp10_fdct8x8_quant_sse2(
- const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
- int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void av1_fdct8x8_quant_sse2(const int16_t *input, int stride,
+ int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
__m128i zero;
int pass;
// Constants
@@ -1282,12 +1284,12 @@
}
#endif // CONFIG_EXT_TX
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[8];
switch (tx_type) {
- case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
+ case DCT_DCT: aom_fdct8x8_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_8x8(input, in, stride, 0, 0);
fadst8_sse2(in);
@@ -2472,12 +2474,12 @@
}
#endif // CONFIG_EXT_TX
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
- int tx_type) {
+void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in0[16], in1[16];
switch (tx_type) {
- case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
+ case DCT_DCT: aom_fdct16x16_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_16x16(input, in0, in1, stride, 0, 0);
fadst16_sse2(in0, in1);
diff --git a/av1/encoder/x86/dct_sse2.asm b/av1/encoder/x86/dct_sse2.asm
index c3a5fb5..7d8eb61 100644
--- a/av1/encoder/x86/dct_sse2.asm
+++ b/av1/encoder/x86/dct_sse2.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
@@ -62,7 +62,7 @@
psllw m0, 2
psllw m1, 2
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; sign extension
mova m2, m0
mova m3, m1
diff --git a/av1/encoder/x86/dct_ssse3.c b/av1/encoder/x86/dct_ssse3.c
index aa018a1..b589914 100644
--- a/av1/encoder/x86/dct_ssse3.c
+++ b/av1/encoder/x86/dct_ssse3.c
@@ -16,11 +16,11 @@
#endif
#include <tmmintrin.h> // SSSE3
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
#include "aom_dsp/x86/inv_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
-void vp10_fdct8x8_quant_ssse3(
+void av1_fdct8x8_quant_ssse3(
const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
diff --git a/av1/encoder/x86/error_intrin_avx2.c b/av1/encoder/x86/error_intrin_avx2.c
index 6e7c093..8ab1ea0 100644
--- a/av1/encoder/x86/error_intrin_avx2.c
+++ b/av1/encoder/x86/error_intrin_avx2.c
@@ -10,11 +10,11 @@
#include <immintrin.h> // AVX2
-#include "./vp10_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./av1_rtcd.h"
+#include "aom/aom_integer.h"
-int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
- intptr_t block_size, int64_t *ssz) {
+int64_t av1_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
__m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
__m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
__m256i sse_reg_64hi, ssz_reg_64hi;
diff --git a/av1/encoder/x86/error_sse2.asm b/av1/encoder/x86/error_sse2.asm
index 0772da4..44a52d7 100644
--- a/av1/encoder/x86/error_sse2.asm
+++ b/av1/encoder/x86/error_sse2.asm
@@ -8,13 +8,13 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
SECTION .text
-; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t av1_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
; int64_t *ssz)
INIT_XMM sse2
@@ -76,7 +76,7 @@
RET
; Compute the sum of squared difference between two int16_t vectors.
-; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+; int64_t av1_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
; intptr_t block_size)
INIT_XMM sse2
diff --git a/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
index 2728880..bae0a81 100644
--- a/av1/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -13,9 +13,9 @@
#include "av1/common/common.h"
-int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
- intptr_t block_size, int64_t *ssz,
- int bps) {
+int64_t av1_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz,
+ int bps) {
int i, j, test;
uint32_t temp[4];
__m128i max, min, cmp0, cmp1, cmp2, cmp3;
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
index a6cb454..d601208 100644
--- a/av1/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <smmintrin.h> /* SSE4.1 */
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "av1/common/vp10_fwd_txfm2d_cfg.h"
-#include "av1/common/vp10_txfm.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "av1/common/av1_fwd_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
#include "av1/common/x86/highbd_txfm_utility_sse4.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -121,10 +121,10 @@
}
// Note:
-// We implement vp10_fwd_txfm2d_4x4(). This function is kept here since
-// vp10_highbd_fht4x4_c() is not removed yet
-void vp10_highbd_fht4x4_sse4_1(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+// We implement av1_fwd_txfm2d_4x4(). This function is kept here since
+// av1_highbd_fht4x4_c() is not removed yet
+void av1_highbd_fht4x4_sse4_1(const int16_t *input, tran_low_t *output,
+ int stride, int tx_type) {
(void)input;
(void)output;
(void)stride;
@@ -206,8 +206,8 @@
in[3] = _mm_unpackhi_epi64(v1, v3);
}
-void vp10_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
- int input_stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
+ int input_stride, int tx_type, int bd) {
__m128i in[4];
const TXFM_2D_CFG *cfg = NULL;
@@ -927,8 +927,8 @@
out[15] = _mm_sub_epi32(kZero, u[1]);
}
-void vp10_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff,
- int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
+ int tx_type, int bd) {
__m128i in[16], out[16];
const TXFM_2D_CFG *cfg = NULL;
@@ -1791,8 +1791,8 @@
write_buffer_8x8(&in[48], output);
}
-void vp10_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
- int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, int tx_type, int bd) {
__m128i in[64], out[64];
const TXFM_2D_CFG *cfg = NULL;
diff --git a/av1/encoder/x86/quantize_sse2.c b/av1/encoder/x86/quantize_sse2.c
index b8cd0c7..2f0051b 100644
--- a/av1/encoder/x86/quantize_sse2.c
+++ b/av1/encoder/x86/quantize_sse2.c
@@ -11,16 +11,16 @@
#include <emmintrin.h>
#include <xmmintrin.h>
-#include "./vp10_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./av1_rtcd.h"
+#include "aom/aom_integer.h"
-void vp10_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
- int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+void av1_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
__m128i zero;
__m128i thr;
int16_t nzflag;
diff --git a/av1/encoder/x86/quantize_ssse3_x86_64.asm b/av1/encoder/x86/quantize_ssse3_x86_64.asm
index b8fefa2..05e0be6 100644
--- a/av1/encoder/x86/quantize_ssse3_x86_64.asm
+++ b/av1/encoder/x86/quantize_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
diff --git a/av1/encoder/x86/ssim_opt_x86_64.asm b/av1/encoder/x86/ssim_opt_x86_64.asm
index 29659ee..4b5c450 100644
--- a/av1/encoder/x86/ssim_opt_x86_64.asm
+++ b/av1/encoder/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vp10_ssim_parms_16x16_sse2) PRIVATE
-sym(vp10_ssim_parms_16x16_sse2):
+global sym(av1_ssim_parms_16x16_sse2) PRIVATE
+sym(av1_ssim_parms_16x16_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vp10_ssim_parms_8x8_sse2) PRIVATE
-sym(vp10_ssim_parms_8x8_sse2):
+global sym(av1_ssim_parms_8x8_sse2) PRIVATE
+sym(av1_ssim_parms_8x8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
diff --git a/av1/encoder/x86/temporal_filter_apply_sse2.asm b/av1/encoder/x86/temporal_filter_apply_sse2.asm
index eabe575..15de6e8 100644
--- a/av1/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/av1/encoder/x86/temporal_filter_apply_sse2.asm
@@ -11,7 +11,7 @@
%include "aom_ports/x86_abi_support.asm"
-; void vp10_temporal_filter_apply_sse2 | arg
+; void av1_temporal_filter_apply_sse2 | arg
; (unsigned char *frame1, | 0
; unsigned int stride, | 1
; unsigned char *frame2, | 2
@@ -21,8 +21,8 @@
; int filter_weight, | 6
; unsigned int *accumulator, | 7
; unsigned short *count) | 8
-global sym(vp10_temporal_filter_apply_sse2) PRIVATE
-sym(vp10_temporal_filter_apply_sse2):
+global sym(av1_temporal_filter_apply_sse2) PRIVATE
+sym(av1_temporal_filter_apply_sse2):
push rbp
mov rbp, rsp
diff --git a/av1/encoder/x86/wedge_utils_sse2.c b/av1/encoder/x86/wedge_utils_sse2.c
index a6be947..35e8493 100644
--- a/av1/encoder/x86/wedge_utils_sse2.c
+++ b/av1/encoder/x86/wedge_utils_sse2.c
@@ -13,17 +13,17 @@
#include "aom_dsp/x86/synonyms.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "av1/common/reconinter.h"
#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
/**
- * See vp10_wedge_sse_from_residuals_c
+ * See av1_wedge_sse_from_residuals_c
*/
-uint64_t vp10_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
- const uint8_t *m, int N) {
+uint64_t av1_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
int n = -N;
int n8 = n + 8;
@@ -94,10 +94,10 @@
}
/**
- * See vp10_wedge_sign_from_residuals_c
+ * See av1_wedge_sign_from_residuals_c
*/
-int vp10_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
- int N, int64_t limit) {
+int av1_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
+ int N, int64_t limit) {
int64_t acc;
__m128i v_sign_d;
@@ -188,10 +188,10 @@
}
/**
- * vp10_wedge_compute_delta_squares_c
+ * av1_wedge_compute_delta_squares_c
*/
-void vp10_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
- const int16_t *b, int N) {
+void av1_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
+ const int16_t *b, int N) {
const __m128i v_neg_w =
_mm_set_epi16(0xffff, 0, 0xffff, 0, 0xffff, 0, 0xffff, 0);
diff --git a/av1/exports_dec b/av1/exports_dec
index 71c8369..05860e8 100644
--- a/av1/exports_dec
+++ b/av1/exports_dec
@@ -1,2 +1,2 @@
-data vpx_codec_vp10_dx_algo
-text vpx_codec_vp10_dx
+data aom_codec_av1_dx_algo
+text aom_codec_av1_dx
diff --git a/av1/exports_enc b/av1/exports_enc
index d1644f2..dc4a9ea 100644
--- a/av1/exports_enc
+++ b/av1/exports_enc
@@ -1,2 +1,2 @@
-data vpx_codec_vp10_cx_algo
-text vpx_codec_vp10_cx
+data aom_codec_av1_cx_algo
+text aom_codec_av1_cx
diff --git a/av1/vp10_common.mk b/av1/vp10_common.mk
deleted file mode 100644
index e776a8a..0000000
--- a/av1/vp10_common.mk
+++ /dev/null
@@ -1,139 +0,0 @@
-##
-## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-##
-## Use of this source code is governed by a BSD-style license
-## that can be found in the LICENSE file in the root of the source
-## tree. An additional intellectual property rights grant can be found
-## in the file PATENTS. All contributing project authors may
-## be found in the AUTHORS file in the root of the source tree.
-##
-
-VP10_COMMON_SRCS-yes += vp10_common.mk
-VP10_COMMON_SRCS-yes += vp10_iface_common.h
-VP10_COMMON_SRCS-yes += common/ans.h
-VP10_COMMON_SRCS-yes += common/alloccommon.c
-VP10_COMMON_SRCS-yes += common/blockd.c
-VP10_COMMON_SRCS-yes += common/debugmodes.c
-VP10_COMMON_SRCS-yes += common/divide.h
-VP10_COMMON_SRCS-yes += common/entropy.c
-VP10_COMMON_SRCS-yes += common/entropymode.c
-VP10_COMMON_SRCS-yes += common/entropymv.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.h
-VP10_COMMON_SRCS-yes += common/alloccommon.h
-VP10_COMMON_SRCS-yes += common/blockd.h
-VP10_COMMON_SRCS-yes += common/common.h
-VP10_COMMON_SRCS-yes += common/entropy.h
-VP10_COMMON_SRCS-yes += common/entropymode.h
-VP10_COMMON_SRCS-yes += common/entropymv.h
-VP10_COMMON_SRCS-yes += common/enums.h
-VP10_COMMON_SRCS-yes += common/filter.h
-VP10_COMMON_SRCS-yes += common/filter.c
-VP10_COMMON_SRCS-yes += common/idct.h
-VP10_COMMON_SRCS-yes += common/idct.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.c
-VP10_COMMON_SRCS-yes += common/loopfilter.h
-VP10_COMMON_SRCS-yes += common/thread_common.h
-VP10_COMMON_SRCS-yes += common/mv.h
-VP10_COMMON_SRCS-yes += common/onyxc_int.h
-VP10_COMMON_SRCS-yes += common/pred_common.h
-VP10_COMMON_SRCS-yes += common/pred_common.c
-VP10_COMMON_SRCS-yes += common/quant_common.h
-VP10_COMMON_SRCS-yes += common/reconinter.h
-VP10_COMMON_SRCS-yes += common/reconintra.h
-VP10_COMMON_SRCS-yes += common/vp10_rtcd.c
-VP10_COMMON_SRCS-yes += common/vp10_rtcd_defs.pl
-VP10_COMMON_SRCS-yes += common/scale.h
-VP10_COMMON_SRCS-yes += common/scale.c
-VP10_COMMON_SRCS-yes += common/seg_common.h
-VP10_COMMON_SRCS-yes += common/seg_common.c
-VP10_COMMON_SRCS-yes += common/tile_common.h
-VP10_COMMON_SRCS-yes += common/tile_common.c
-VP10_COMMON_SRCS-yes += common/loopfilter.c
-VP10_COMMON_SRCS-yes += common/thread_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.h
-VP10_COMMON_SRCS-yes += common/quant_common.c
-VP10_COMMON_SRCS-yes += common/reconinter.c
-VP10_COMMON_SRCS-yes += common/reconintra.c
-VP10_COMMON_SRCS-yes += common/restoration.h
-VP10_COMMON_SRCS-yes += common/common_data.h
-VP10_COMMON_SRCS-yes += common/scan.c
-VP10_COMMON_SRCS-yes += common/scan.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.c
-VP10_COMMON_SRCS-yes += common/vp10_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm1d.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm1d.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm1d.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm1d.c
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm2d.c
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm2d_cfg.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm2d.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm2d_cfg.h
-VP10_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp10_convolve_ssse3.c
-VP10_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp10_convolve_filters_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_highbd_convolve_sse4.c
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_highbd_convolve_filters_sse4.c
-endif
-VP10_COMMON_SRCS-yes += common/vp10_convolve.c
-VP10_COMMON_SRCS-yes += common/vp10_convolve.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/ans.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/divide.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/divide.c
-VP10_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.h
-VP10_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.c
-ifeq (yes,$(filter yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)))
-VP10_COMMON_SRCS-yes += common/warped_motion.h
-VP10_COMMON_SRCS-yes += common/warped_motion.c
-endif
-VP10_COMMON_SRCS-yes += common/clpf.c
-VP10_COMMON_SRCS-yes += common/clpf.h
-ifeq ($(CONFIG_DERING),yes)
-VP10_COMMON_SRCS-yes += common/od_dering.c
-VP10_COMMON_SRCS-yes += common/od_dering.h
-VP10_COMMON_SRCS-yes += common/dering.c
-VP10_COMMON_SRCS-yes += common/dering.h
-endif
-VP10_COMMON_SRCS-yes += common/odintrin.c
-VP10_COMMON_SRCS-yes += common/odintrin.h
-
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans4_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans8_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans16_dspr2.c
-endif
-
-# common (msa)
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
-
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_dct32x32_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_txfm1d_sse4.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_fwd_txfm1d_sse4.c
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_fwd_txfm2d_sse4.c
-
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_txfm_utility_sse4.h
-endif
-
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
-endif
-
-ifeq ($(CONFIG_EXT_INTRA),yes)
-VP10_COMMON_SRCS-yes += common/intra_filters.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/reconintra_sse4.c
-endif
-
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.h
-
-$(eval $(call rtcd_h_template,vp10_rtcd,av1/common/vp10_rtcd_defs.pl))
diff --git a/av1/vp10_iface_common.h b/av1/vp10_iface_common.h
deleted file mode 100644
index 37a9cc1..0000000
--- a/av1/vp10_iface_common.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-#ifndef VP10_VP10_IFACE_COMMON_H_
-#define VP10_VP10_IFACE_COMMON_H_
-
-#include "aom_ports/mem.h"
-
-static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
- void *user_priv) {
- /** vpx_img_wrap() doesn't allow specifying independent strides for
- * the Y, U, and V planes, nor other alignment adjustments that
- * might be representable by a YV12_BUFFER_CONFIG, so we just
- * initialize all the fields.*/
- int bps;
- if (!yv12->subsampling_y) {
- if (!yv12->subsampling_x) {
- img->fmt = VPX_IMG_FMT_I444;
- bps = 24;
- } else {
- img->fmt = VPX_IMG_FMT_I422;
- bps = 16;
- }
- } else {
- if (!yv12->subsampling_x) {
- img->fmt = VPX_IMG_FMT_I440;
- bps = 16;
- } else {
- img->fmt = VPX_IMG_FMT_I420;
- bps = 12;
- }
- }
- img->cs = yv12->color_space;
- img->range = yv12->color_range;
- img->bit_depth = 8;
- img->w = yv12->y_stride;
- img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VPX_ENC_BORDER_IN_PIXELS, 3);
- img->d_w = yv12->y_crop_width;
- img->d_h = yv12->y_crop_height;
- img->r_w = yv12->render_width;
- img->r_h = yv12->render_height;
- img->x_chroma_shift = yv12->subsampling_x;
- img->y_chroma_shift = yv12->subsampling_y;
- img->planes[VPX_PLANE_Y] = yv12->y_buffer;
- img->planes[VPX_PLANE_U] = yv12->u_buffer;
- img->planes[VPX_PLANE_V] = yv12->v_buffer;
- img->planes[VPX_PLANE_ALPHA] = NULL;
- img->stride[VPX_PLANE_Y] = yv12->y_stride;
- img->stride[VPX_PLANE_U] = yv12->uv_stride;
- img->stride[VPX_PLANE_V] = yv12->uv_stride;
- img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
- // vpx_image_t uses byte strides and a pointer to the first byte
- // of the image.
- img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
- img->bit_depth = yv12->bit_depth;
- img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
- img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
- img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
- img->planes[VPX_PLANE_ALPHA] = NULL;
- img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
- img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
- img->stride[VPX_PLANE_V] = 2 * yv12->uv_stride;
- img->stride[VPX_PLANE_ALPHA] = 2 * yv12->y_stride;
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
- img->bps = bps;
- img->user_priv = user_priv;
- img->img_data = yv12->buffer_alloc;
- img->img_data_owner = 0;
- img->self_allocd = 0;
-}
-
-static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
- YV12_BUFFER_CONFIG *yv12) {
- yv12->y_buffer = img->planes[VPX_PLANE_Y];
- yv12->u_buffer = img->planes[VPX_PLANE_U];
- yv12->v_buffer = img->planes[VPX_PLANE_V];
-
- yv12->y_crop_width = img->d_w;
- yv12->y_crop_height = img->d_h;
- yv12->render_width = img->r_w;
- yv12->render_height = img->r_h;
- yv12->y_width = img->d_w;
- yv12->y_height = img->d_h;
-
- yv12->uv_width =
- img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
- yv12->uv_height =
- img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
- yv12->uv_crop_width = yv12->uv_width;
- yv12->uv_crop_height = yv12->uv_height;
-
- yv12->y_stride = img->stride[VPX_PLANE_Y];
- yv12->uv_stride = img->stride[VPX_PLANE_U];
- yv12->color_space = img->cs;
- yv12->color_range = img->range;
-
-#if CONFIG_VP9_HIGHBITDEPTH
- if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
- // In vpx_image_t
- // planes point to uint8 address of start of data
- // stride counts uint8s to reach next row
- // In YV12_BUFFER_CONFIG
- // y_buffer, u_buffer, v_buffer point to uint16 address of data
- // stride and border counts in uint16s
- // This means that all the address calculations in the main body of code
- // should work correctly.
- // However, before we do any pixel operations we need to cast the address
- // to a uint16 ponter and double its value.
- yv12->y_buffer = CONVERT_TO_BYTEPTR(yv12->y_buffer);
- yv12->u_buffer = CONVERT_TO_BYTEPTR(yv12->u_buffer);
- yv12->v_buffer = CONVERT_TO_BYTEPTR(yv12->v_buffer);
- yv12->y_stride >>= 1;
- yv12->uv_stride >>= 1;
- yv12->flags = YV12_FLAG_HIGHBITDEPTH;
- } else {
- yv12->flags = 0;
- }
- yv12->border = (yv12->y_stride - img->w) / 2;
-#else
- yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
-#endif // CONFIG_VP9_HIGHBITDEPTH
- yv12->subsampling_x = img->x_chroma_shift;
- yv12->subsampling_y = img->y_chroma_shift;
- return VPX_CODEC_OK;
-}
-
-static VPX_REFFRAME ref_frame_to_vp10_reframe(vpx_ref_frame_type_t frame) {
- switch (frame) {
- case VP8_LAST_FRAME: return VPX_LAST_FLAG;
- case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
- case VP8_ALTR_FRAME: return VPX_ALT_FLAG;
- }
- assert(0 && "Invalid Reference Frame");
- return VPX_LAST_FLAG;
-}
-#endif // VP10_VP10_IFACE_COMMON_H_
diff --git a/av1/vp10cx.mk b/av1/vp10cx.mk
index e4d40c8..463c5f7 100644
--- a/av1/vp10cx.mk
+++ b/av1/vp10cx.mk
@@ -8,140 +8,140 @@
## be found in the AUTHORS file in the root of the source tree.
##
-VP10_CX_EXPORTS += exports_enc
+AV1_CX_EXPORTS += exports_enc
-VP10_CX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_CX_SRCS-no += $(VP10_COMMON_SRCS-no)
-VP10_CX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_CX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no += $(AV1_COMMON_SRCS_REMOVE-no)
-VP10_CX_SRCS-yes += vp10_cx_iface.c
+AV1_CX_SRCS-yes += av1_cx_iface.c
-VP10_CX_SRCS-yes += encoder/bitstream.c
-VP10_CX_SRCS-yes += encoder/bitwriter.h
-VP10_CX_SRCS-yes += encoder/context_tree.c
-VP10_CX_SRCS-yes += encoder/context_tree.h
-VP10_CX_SRCS-yes += encoder/variance_tree.c
-VP10_CX_SRCS-yes += encoder/variance_tree.h
-VP10_CX_SRCS-yes += encoder/cost.h
-VP10_CX_SRCS-yes += encoder/cost.c
-VP10_CX_SRCS-yes += encoder/dct.c
-VP10_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
-VP10_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
-VP10_CX_SRCS-yes += encoder/encodeframe.c
-VP10_CX_SRCS-yes += encoder/encodeframe.h
-VP10_CX_SRCS-yes += encoder/encodemb.c
-VP10_CX_SRCS-yes += encoder/encodemv.c
-VP10_CX_SRCS-yes += encoder/ethread.h
-VP10_CX_SRCS-yes += encoder/ethread.c
-VP10_CX_SRCS-yes += encoder/extend.c
-VP10_CX_SRCS-yes += encoder/firstpass.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
-VP10_CX_SRCS-yes += encoder/block.h
-VP10_CX_SRCS-yes += encoder/bitstream.h
-VP10_CX_SRCS-yes += encoder/encodemb.h
-VP10_CX_SRCS-yes += encoder/encodemv.h
-VP10_CX_SRCS-yes += encoder/extend.h
-VP10_CX_SRCS-yes += encoder/firstpass.h
-VP10_CX_SRCS-yes += encoder/lookahead.c
-VP10_CX_SRCS-yes += encoder/lookahead.h
-VP10_CX_SRCS-yes += encoder/mcomp.h
-VP10_CX_SRCS-yes += encoder/encoder.h
-VP10_CX_SRCS-yes += encoder/quantize.h
-VP10_CX_SRCS-yes += encoder/ratectrl.h
-VP10_CX_SRCS-yes += encoder/rd.h
-VP10_CX_SRCS-yes += encoder/rdopt.h
-VP10_CX_SRCS-yes += encoder/tokenize.h
-VP10_CX_SRCS-yes += encoder/treewriter.h
-VP10_CX_SRCS-yes += encoder/mcomp.c
-VP10_CX_SRCS-yes += encoder/encoder.c
-VP10_CX_SRCS-yes += encoder/palette.h
-VP10_CX_SRCS-yes += encoder/palette.c
-VP10_CX_SRCS-yes += encoder/picklpf.c
-VP10_CX_SRCS-yes += encoder/picklpf.h
-VP10_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
-VP10_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
-VP10_CX_SRCS-yes += encoder/quantize.c
-VP10_CX_SRCS-yes += encoder/ratectrl.c
-VP10_CX_SRCS-yes += encoder/rd.c
-VP10_CX_SRCS-yes += encoder/rdopt.c
-VP10_CX_SRCS-yes += encoder/segmentation.c
-VP10_CX_SRCS-yes += encoder/segmentation.h
-VP10_CX_SRCS-yes += encoder/speed_features.c
-VP10_CX_SRCS-yes += encoder/speed_features.h
-VP10_CX_SRCS-yes += encoder/subexp.c
-VP10_CX_SRCS-yes += encoder/subexp.h
-VP10_CX_SRCS-yes += encoder/resize.c
-VP10_CX_SRCS-yes += encoder/resize.h
-VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
-VP10_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
-VP10_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/bitwriter.h
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/variance_tree.c
+AV1_CX_SRCS-yes += encoder/variance_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/palette.h
+AV1_CX_SRCS-yes += encoder/palette.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
-VP10_CX_SRCS-yes += encoder/tokenize.c
-VP10_CX_SRCS-yes += encoder/treewriter.c
-VP10_CX_SRCS-yes += encoder/aq_variance.c
-VP10_CX_SRCS-yes += encoder/aq_variance.h
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
-VP10_CX_SRCS-yes += encoder/aq_complexity.c
-VP10_CX_SRCS-yes += encoder/aq_complexity.h
-VP10_CX_SRCS-yes += encoder/temporal_filter.c
-VP10_CX_SRCS-yes += encoder/temporal_filter.h
-VP10_CX_SRCS-yes += encoder/mbgraph.c
-VP10_CX_SRCS-yes += encoder/mbgraph.h
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
ifeq ($(CONFIG_DERING),yes)
-VP10_CX_SRCS-yes += encoder/pickdering.c
+AV1_CX_SRCS-yes += encoder/pickdering.c
endif
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
endif
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
ifeq ($(ARCH_X86_64),yes)
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
endif
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
-VP10_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
-VP10_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp10_highbd_quantize_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/av1_highbd_quantize_sse4.c
endif
ifeq ($(CONFIG_EXT_INTER),yes)
-VP10_CX_SRCS-yes += encoder/wedge_utils.c
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
+AV1_CX_SRCS-yes += encoder/wedge_utils.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
endif
-VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
endif
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
-VP10_CX_SRCS-yes := $(filter-out $(VP10_CX_SRCS_REMOVE-yes),$(VP10_CX_SRCS-yes))
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/vp10dx.mk b/av1/vp10dx.mk
deleted file mode 100644
index ae68475..0000000
--- a/av1/vp10dx.mk
+++ /dev/null
@@ -1,34 +0,0 @@
-##
-## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-##
-## Use of this source code is governed by a BSD-style license
-## that can be found in the LICENSE file in the root of the source
-## tree. An additional intellectual property rights grant can be found
-## in the file PATENTS. All contributing project authors may
-## be found in the AUTHORS file in the root of the source tree.
-##
-
-VP10_DX_EXPORTS += exports_dec
-
-VP10_DX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_DX_SRCS-no += $(VP10_COMMON_SRCS-no)
-VP10_DX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_DX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no)
-
-VP10_DX_SRCS-yes += vp10_dx_iface.c
-
-VP10_DX_SRCS-yes += decoder/decodemv.c
-VP10_DX_SRCS-yes += decoder/decodeframe.c
-VP10_DX_SRCS-yes += decoder/decodeframe.h
-VP10_DX_SRCS-yes += decoder/detokenize.c
-VP10_DX_SRCS-yes += decoder/decodemv.h
-VP10_DX_SRCS-yes += decoder/detokenize.h
-VP10_DX_SRCS-yes += decoder/dthread.c
-VP10_DX_SRCS-yes += decoder/dthread.h
-VP10_DX_SRCS-yes += decoder/decoder.c
-VP10_DX_SRCS-yes += decoder/decoder.h
-VP10_DX_SRCS-yes += decoder/dsubexp.c
-VP10_DX_SRCS-yes += decoder/dsubexp.h
-VP10_DX_SRCS-yes += decoder/bitreader.h
-
-VP10_DX_SRCS-yes := $(filter-out $(VP10_DX_SRCS_REMOVE-yes),$(VP10_DX_SRCS-yes))