Change "vp10" to "av1" in source code
Change-Id: Ifd0d08b97538dcc04227eceb6fb087224c760c59
diff --git a/aom/aom_codec.mk b/aom/aom_codec.mk
index 8ac6033..4f385c6 100644
--- a/aom/aom_codec.mk
+++ b/aom/aom_codec.mk
@@ -12,14 +12,14 @@
API_EXPORTS += exports
API_SRCS-$(CONFIG_V10_ENCODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
+API_SRCS-$(CONFIG_AV1_ENCODER) += vp8cx.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += vp8.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += vp8cx.h
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
+API_SRCS-$(CONFIG_AV1_DECODER) += vp8.h
+API_SRCS-$(CONFIG_AV1_DECODER) += vp8dx.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += vp8.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += vp8dx.h
API_DOC_SRCS-yes += aom_codec.h
API_DOC_SRCS-yes += aom_decoder.h
diff --git a/aom/vp8cx.h b/aom/vp8cx.h
index 882d084..e3cd331 100644
--- a/aom/vp8cx.h
+++ b/aom/vp8cx.h
@@ -28,13 +28,13 @@
extern "C" {
#endif
-/*!\name Algorithm interface for VP10
+/*!\name Algorithm interface for AV1
*
- * This interface provides the capability to encode raw VP10 streams.
+ * This interface provides the capability to encode raw AV1 streams.
* @{
*/
-extern aom_codec_iface_t aom_codec_vp10_cx_algo;
-extern aom_codec_iface_t *aom_codec_vp10_cx(void);
+extern aom_codec_iface_t aom_codec_av1_cx_algo;
+extern aom_codec_iface_t *aom_codec_av1_cx(void);
/*!@} - end algorithm interface member group*/
/*
diff --git a/aom/vp8dx.h b/aom/vp8dx.h
index 307d217..9efbc8c 100644
--- a/aom/vp8dx.h
+++ b/aom/vp8dx.h
@@ -28,13 +28,13 @@
/* Include controls common to both the encoder and decoder */
#include "./vp8.h"
-/*!\name Algorithm interface for VP10
+/*!\name Algorithm interface for AV1
*
- * This interface provides the capability to decode VP10 streams.
+ * This interface provides the capability to decode AV1 streams.
* @{
*/
-extern aom_codec_iface_t aom_codec_vp10_dx_algo;
-extern aom_codec_iface_t *aom_codec_vp10_dx(void);
+extern aom_codec_iface_t aom_codec_av1_dx_algo;
+extern aom_codec_iface_t *aom_codec_av1_dx(void);
/*!@} - end algorithm interface member group*/
/*!\enum vp8_dec_control_id
diff --git a/aom_dsp/aom_convolve.c b/aom_dsp/aom_convolve.c
index 233e850..d74957b 100644
--- a/aom_dsp/aom_convolve.c
+++ b/aom_dsp/aom_convolve.c
@@ -329,7 +329,7 @@
filter_y, y_step_q4, w, h);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *x_filters, int x0_q4,
diff --git a/aom_dsp/aom_convolve.h b/aom_dsp/aom_convolve.h
index 1fc4af4..6f17c4a 100644
--- a/aom_dsp/aom_convolve.h
+++ b/aom_dsp/aom_convolve.h
@@ -24,7 +24,7 @@
const int16_t *filter_y, int y_step_q4, int w,
int h);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
diff --git a/aom_dsp/aom_dsp.mk b/aom_dsp/aom_dsp.mk
index fcc04c2..2aea32c 100644
--- a/aom_dsp/aom_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -45,12 +45,12 @@
DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
endif # CONFIG_USE_X86INC
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
ifeq ($(CONFIG_USE_X86INC),yes)
DSP_SRCS-$(HAVE_SSE) += x86/highbd_intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_intrapred_sse2.asm
endif # CONFIG_USE_X86INC
-endif # CONFIG_VPX_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
DSP_SRCS-$(HAVE_NEON_ASM) += arm/intrapred_neon_asm$(ASM)
DSP_SRCS-$(HAVE_NEON) += arm/intrapred_neon.c
@@ -75,7 +75,7 @@
DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_bilinear_ssse3.asm
DSP_SRCS-$(HAVE_AVX2) += x86/aom_subpixel_8t_intrin_avx2.c
DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_intrin_ssse3.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_8t_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_bilinear_sse2.asm
endif
@@ -156,15 +156,15 @@
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_horiz_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_vert_dspr2.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_loopfilter_sse2.c
-endif # CONFIG_VPX_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
DSP_SRCS-yes += txfm_common.h
DSP_SRCS-$(HAVE_SSE2) += x86/txfm_common_sse2.h
DSP_SRCS-$(HAVE_MSA) += mips/txfm_macros_msa.h
# forward transform
-ifneq ($(filter yes,$(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes,$(CONFIG_AV1_ENCODER)),)
DSP_SRCS-yes += fwd_txfm.c
DSP_SRCS-yes += fwd_txfm.h
DSP_SRCS-$(HAVE_SSE2) += x86/fwd_txfm_sse2.h
@@ -182,10 +182,10 @@
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.h
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/fwd_dct32x32_msa.c
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
# inverse transform
-ifneq ($(filter yes,$(CONFIG_VP10)),)
+ifneq ($(filter yes,$(CONFIG_AV1)),)
DSP_SRCS-yes += inv_txfm.h
DSP_SRCS-yes += inv_txfm.c
DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.h
@@ -227,23 +227,23 @@
DSP_SRCS-$(HAVE_MSA) += mips/idct16x16_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/idct32x32_msa.c
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_DSPR2) += mips/inv_txfm_dspr2.h
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans4_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans8_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
-endif # CONFIG_VPX_HIGHBITDEPTH
-endif # CONFIG_VP10
+endif # CONFIG_AOM_HIGHBITDEPTH
+endif # CONFIG_AV1
# quantization
-ifneq ($(filter yes, $(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes, $(CONFIG_AV1_ENCODER)),)
DSP_SRCS-yes += quantize.c
DSP_SRCS-yes += quantize.h
DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c
endif
ifeq ($(ARCH_X86_64),yes)
@@ -264,7 +264,7 @@
endif
endif
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
ifeq ($(CONFIG_ENCODERS),yes)
DSP_SRCS-yes += sad.c
@@ -292,10 +292,10 @@
DSP_SRCS-$(HAVE_SSE2) += x86/sad_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/subtract_sse2.asm
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm
-endif # CONFIG_VPX_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
endif # CONFIG_USE_X86INC
endif # CONFIG_ENCODERS
@@ -334,13 +334,13 @@
DSP_SRCS-$(HAVE_SSE2) += x86/subpel_variance_sse2.asm # Contains SSE2 and SSSE3
endif # CONFIG_USE_X86INC
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_impl_sse2.asm
ifeq ($(CONFIG_USE_X86INC),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_subpel_variance_impl_sse2.asm
endif # CONFIG_USE_X86INC
-endif # CONFIG_VPX_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
endif # CONFIG_ENCODERS
DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
diff --git a/aom_dsp/aom_dsp_common.h b/aom_dsp/aom_dsp_common.h
index 54a3c32..2372049 100644
--- a/aom_dsp/aom_dsp_common.h
+++ b/aom_dsp/aom_dsp_common.h
@@ -29,7 +29,7 @@
#define AOM_QM_BITS 6
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Note:
// tran_low_t is the datatype used for final transform coefficients.
// tran_high_t is the datatype used for intermediate transform stages.
@@ -41,7 +41,7 @@
// tran_high_t is the datatype used for intermediate transform stages.
typedef int32_t tran_high_t;
typedef int16_t tran_low_t;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static INLINE uint8_t clip_pixel(int val) {
return (val > 255) ? 255 : (val < 0) ? 0 : val;
@@ -55,7 +55,7 @@
return value < low ? low : (value > high ? high : value);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
switch (bd) {
case 8:
@@ -64,7 +64,7 @@
case 12: return (uint16_t)clamp(val, 0, 4095);
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 2c504a9..a01cd96 100644
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -256,7 +256,7 @@
specialize qw/aom_dc_128_predictor_32x32 msa neon/, "$sse2_x86inc";
# High bitdepth functions
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/aom_highbd_d207_predictor_4x4/;
@@ -448,7 +448,7 @@
add_proto qw/void aom_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/aom_highbd_dc_128_predictor_32x32/;
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
#
# Sub Pixel Filters
@@ -495,7 +495,7 @@
add_proto qw/void aom_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
specialize qw/aom_scaled_avg_vert/;
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
#
# Sub Pixel Filters
#
@@ -522,7 +522,7 @@
add_proto qw/void aom_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/aom_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
#
# Loopfilter
@@ -565,7 +565,7 @@
add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
specialize qw/aom_highbd_lpf_vertical_16 sse2/;
@@ -598,7 +598,7 @@
add_proto qw/void aom_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
specialize qw/aom_highbd_lpf_horizontal_4_dual sse2/;
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
#
# Encoder functions.
@@ -607,8 +607,8 @@
#
# Forward transform
#
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct4x4 sse2/;
@@ -686,13 +686,13 @@
add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct32x32_1 sse2 msa/;
-} # CONFIG_VPX_HIGHBITDEPTH
-} # CONFIG_VP10_ENCODER
+} # CONFIG_AOM_HIGHBITDEPTH
+} # CONFIG_AV1_ENCODER
#
# Inverse transform
-if (aom_config("CONFIG_VP10") eq "yes") {
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AV1") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# Note as optimized versions of these functions are added we need to add a check to ensure
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
@@ -925,40 +925,40 @@
add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
specialize qw/aom_iwht4x4_16_add msa/, "$sse2_x86inc";
} # CONFIG_EMULATE_HARDWARE
-} # CONFIG_VPX_HIGHBITDEPTH
-} # CONFIG_VP10
+} # CONFIG_AOM_HIGHBITDEPTH
+} # CONFIG_AV1
#
# Quantization
#
if (aom_config("CONFIG_AOM_QM") eq "yes") {
- if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- } # CONFIG_VPX_HIGHBITDEPTH
- } # CONFIG_VP10_ENCODER
+ } # CONFIG_AOM_HIGHBITDEPTH
+ } # CONFIG_AV1_ENCODER
} else {
- if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/aom_quantize_b sse2/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc";
add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/aom_quantize_b_32x32/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc";
- if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/aom_highbd_quantize_b sse2/;
add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/aom_highbd_quantize_b_32x32 sse2/;
- } # CONFIG_VPX_HIGHBITDEPTH
- } # CONFIG_VP10_ENCODER
+ } # CONFIG_AOM_HIGHBITDEPTH
+ } # CONFIG_AV1_ENCODER
} # CONFIG_AOM_QM
if (aom_config("CONFIG_ENCODERS") eq "yes") {
@@ -1013,7 +1013,7 @@
#
# Avg
#
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
specialize qw/aom_avg_8x8 sse2 neon msa/;
@@ -1040,7 +1040,7 @@
add_proto qw/int aom_vector_var/, "int16_t const *ref, int16_t const *src, const int bwl";
specialize qw/aom_vector_var neon sse2/;
-} # CONFIG_VP10_ENCODER
+} # CONFIG_AV1_ENCODER
add_proto qw/unsigned int aom_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
specialize qw/aom_sad64x64_avg avx2 msa/, "$sse2_x86inc";
@@ -1187,7 +1187,7 @@
specialize qw/aom_ssim_parms_16x16/, "$sse2_x86_64";
}
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
#
# Block subtraction
#
@@ -1387,7 +1387,7 @@
add_proto qw/void aom_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
specialize qw/aom_highbd_ssim_parms_8x8/;
}
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
} # CONFIG_ENCODERS
if (aom_config("CONFIG_ENCODERS") eq "yes") {
@@ -1556,7 +1556,7 @@
add_proto qw/uint32_t aom_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/aom_variance_halfpixvar16x16_hv mmx sse2 media/;
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/aom_highbd_12_variance64x64 sse2/;
@@ -1913,7 +1913,7 @@
add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
} # CONFIG_ENCODERS
1;
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index bbdb090..18b5474 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -189,7 +189,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
unsigned int aom_highbd_avg_8x8_c(const uint8_t *s8, int p) {
int i, j;
int sum = 0;
@@ -227,4 +227,4 @@
}
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/fwd_txfm.c b/aom_dsp/fwd_txfm.c
index 68409f9..4aa448a 100644
--- a/aom_dsp/fwd_txfm.c
+++ b/aom_dsp/fwd_txfm.c
@@ -770,7 +770,7 @@
output[1] = 0;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
int stride) {
aom_fdct4x4_c(input, output, stride);
@@ -809,4 +809,4 @@
int stride) {
aom_fdct32x32_1_c(input, out, stride);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c
index e75faa6..83d1039 100644
--- a/aom_dsp/intrapred.c
+++ b/aom_dsp/intrapred.c
@@ -490,7 +490,7 @@
DST(1, 3) = AVG3(L, K, J);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
int bs, const uint16_t *above,
const uint16_t *left, int bd) {
@@ -765,7 +765,7 @@
dst += stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// This serves as a wrapper function, so that all the prediction functions
// can be unified and accessed as a pointer array. Note that the boundary
@@ -777,7 +777,7 @@
type##_predictor(dst, stride, size, above, left); \
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define intra_pred_highbd_sized(type, size) \
void aom_highbd_##type##_predictor_##size##x##size##_c( \
uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
@@ -806,7 +806,7 @@
#define intra_pred_no_4x4(type) \
intra_pred_sized(type, 8) intra_pred_sized(type, 16) \
intra_pred_sized(type, 32)
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* clang-format off */
intra_pred_no_4x4(d207)
diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c
index 0e40949..110ba26 100644
--- a/aom_dsp/inv_txfm.c
+++ b/aom_dsp/inv_txfm.c
@@ -1251,7 +1251,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
@@ -2487,4 +2487,4 @@
dest += stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/inv_txfm.h b/aom_dsp/inv_txfm.h
index c071670..e36b35f 100644
--- a/aom_dsp/inv_txfm.h
+++ b/aom_dsp/inv_txfm.h
@@ -41,7 +41,7 @@
return check_range(rv);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
#if CONFIG_COEFFICIENT_RANGE_CHECKING
// For valid highbitdepth streams, intermediate stage coefficients will
@@ -64,7 +64,7 @@
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
return highbd_check_range(rv, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EMULATE_HARDWARE
// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -96,7 +96,7 @@
void iadst8_c(const tran_low_t *input, tran_low_t *output);
void iadst16_c(const tran_low_t *input, tran_low_t *output);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
diff --git a/aom_dsp/loopfilter.c b/aom_dsp/loopfilter.c
index da9ea91..e43ebe8 100644
--- a/aom_dsp/loopfilter.c
+++ b/aom_dsp/loopfilter.c
@@ -19,7 +19,7 @@
return (int8_t)clamp(t, -128, 127);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int16_t signed_char_clamp_high(int t, int bd) {
switch (bd) {
case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1);
@@ -342,7 +342,7 @@
mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Should we apply any filter at all: 11111111 yes, 00000000 no ?
static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
uint16_t p3, uint16_t p2, uint16_t p1,
@@ -706,4 +706,4 @@
const uint8_t *thresh, int bd) {
highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/quantize.c b/aom_dsp/quantize.c
index 2a194c6..1b9bbdc 100644
--- a/aom_dsp/quantize.c
+++ b/aom_dsp/quantize.c
@@ -40,7 +40,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -99,7 +99,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
@@ -192,7 +192,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -316,7 +316,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_b_32x32_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -400,7 +400,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -450,7 +450,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
@@ -527,7 +527,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -632,7 +632,7 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_b_32x32_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
diff --git a/aom_dsp/quantize.h b/aom_dsp/quantize.h
index cb941b5..ffb158d 100644
--- a/aom_dsp/quantize.h
+++ b/aom_dsp/quantize.h
@@ -38,7 +38,7 @@
uint16_t *eob_ptr, const int16_t *scan,
const int16_t *iscan, const qm_val_t *qm_ptr,
const qm_val_t *iqm_ptr);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
@@ -75,7 +75,7 @@
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
uint16_t *eob_ptr, const int16_t *scan,
const int16_t *iscan);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c
index a94876a..2945a4a 100644
--- a/aom_dsp/sad.c
+++ b/aom_dsp/sad.c
@@ -54,7 +54,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
int width, int height, const uint8_t *ref8,
int ref_stride) {
@@ -71,7 +71,7 @@
ref += ref_stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define sadMxN(m, n) \
unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
@@ -179,7 +179,7 @@
sadMxNx4D(4, 4)
/* clang-format on */
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE
unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
int b_stride, int width, int height) {
@@ -317,4 +317,4 @@
highbd_sadMxNx4D(4, 4)
/* clang-format on */
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.c b/aom_dsp/ssim.c
index c208070..52e263e 100644
--- a/aom_dsp/ssim.c
+++ b/aom_dsp/ssim.c
@@ -45,7 +45,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
int rp, uint32_t *sum_s, uint32_t *sum_r,
uint32_t *sum_sq_s, uint32_t *sum_sq_r,
@@ -61,7 +61,7 @@
}
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static const int64_t cc1 = 26634; // (64^2*(.01*255)^2
static const int64_t cc2 = 239708; // (64^2*(.03*255)^2
@@ -92,7 +92,7 @@
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
int rp, unsigned int bd) {
uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
@@ -102,7 +102,7 @@
return similarity(sum_s >> oshift, sum_r >> oshift, sum_sq_s >> (2 * oshift),
sum_sq_r >> (2 * oshift), sum_sxr >> (2 * oshift), 64);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
@@ -127,7 +127,7 @@
return ssim_total;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
int stride_img1, int stride_img2, int width,
int height, unsigned int bd) {
@@ -149,7 +149,7 @@
ssim_total /= samples;
return ssim_total;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight) {
@@ -436,7 +436,7 @@
return inconsistency_total;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight,
unsigned int bd) {
@@ -486,4 +486,4 @@
return ssim_all;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.h b/aom_dsp/ssim.h
index afe9d9a..0b4d8f4 100644
--- a/aom_dsp/ssim.h
+++ b/aom_dsp/ssim.h
@@ -80,7 +80,7 @@
const YV12_BUFFER_CONFIG *dest, double *ssim_y,
double *ssim_u, double *ssim_v);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight,
unsigned int bd);
@@ -88,7 +88,7 @@
double aom_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *ssim_y,
double *ssim_u, double *ssim_v, unsigned int bd);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
diff --git a/aom_dsp/subtract.c b/aom_dsp/subtract.c
index 3890d46..da526c4 100644
--- a/aom_dsp/subtract.c
+++ b/aom_dsp/subtract.c
@@ -32,7 +32,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
ptrdiff_t diff_stride, const uint8_t *src8,
ptrdiff_t src_stride, const uint8_t *pred8,
@@ -52,4 +52,4 @@
src += src_stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c
index 3367538..bf97a6b 100644
--- a/aom_dsp/variance.c
+++ b/aom_dsp/variance.c
@@ -257,7 +257,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
uint64_t *sse, uint64_t *sum) {
@@ -573,4 +573,4 @@
ref += ref_stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h
index 81966fc..8bd10dd 100644
--- a/aom_dsp/variance.h
+++ b/aom_dsp/variance.h
@@ -54,7 +54,7 @@
const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset,
const uint8_t *b_ptr, int b_stride, unsigned int *sse,
const uint8_t *second_pred);
-#if CONFIG_VP10
+#if CONFIG_AV1
typedef struct aom_variance_vtable {
aom_sad_fn_t sdf;
aom_sad_avg_fn_t sdaf;
@@ -65,7 +65,7 @@
aom_sad_multi_fn_t sdx8f;
aom_sad_multi_d_fn_t sdx4df;
} aom_variance_fn_ptr_t;
-#endif // CONFIG_VP10
+#endif // CONFIG_AV1
#ifdef __cplusplus
} // extern "C"
diff --git a/aom_dsp/x86/aom_asm_stubs.c b/aom_dsp/x86/aom_asm_stubs.c
index be8cba5..1b71a9f 100644
--- a/aom_dsp/x86/aom_asm_stubs.c
+++ b/aom_dsp/x86/aom_asm_stubs.c
@@ -78,7 +78,7 @@
FUN_CONV_2D(, sse2);
FUN_CONV_2D(avg_, sse2);
-#if CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
+#if CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_sse2;
highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_sse2;
@@ -159,5 +159,5 @@
// int w, int h, int bd);
HIGH_FUN_CONV_2D(, sse2);
HIGH_FUN_CONV_2D(avg_, sse2);
-#endif // CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
+#endif // CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
#endif // HAVE_SSE2
diff --git a/aom_dsp/x86/aom_convolve_copy_sse2.asm b/aom_dsp/x86/aom_convolve_copy_sse2.asm
index eb66e84..4182c19 100644
--- a/aom_dsp/x86/aom_convolve_copy_sse2.asm
+++ b/aom_dsp/x86/aom_convolve_copy_sse2.asm
@@ -222,7 +222,7 @@
INIT_XMM sse2
convolve_fn copy
convolve_fn avg
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
convolve_fn copy, highbd
convolve_fn avg, highbd
%endif
diff --git a/aom_dsp/x86/convolve.h b/aom_dsp/x86/convolve.h
index cf43654..dd453b0 100644
--- a/aom_dsp/x86/convolve.h
+++ b/aom_dsp/x86/convolve.h
@@ -104,7 +104,7 @@
} \
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
const ptrdiff_t src_pitch,
@@ -206,6 +206,6 @@
w, h, bd); \
} \
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // VPX_DSP_X86_CONVOLVE_H_
diff --git a/aom_dsp/x86/fwd_txfm_sse2.c b/aom_dsp/x86/fwd_txfm_sse2.c
index 2afb212..4dcc67c 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/aom_dsp/x86/fwd_txfm_sse2.c
@@ -247,7 +247,7 @@
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_HIGH_BIT_DEPTH 1
#define FDCT4x4_2D aom_highbd_fdct4x4_sse2
#define FDCT8x8_2D aom_highbd_fdct8x8_sse2
@@ -269,4 +269,4 @@
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.h b/aom_dsp/x86/fwd_txfm_sse2.h
index 6149938..8e1a007 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_sse2.h
@@ -245,7 +245,7 @@
}
static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
__m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -254,11 +254,11 @@
_mm_store_si128((__m128i *)(dst_ptr + 4), out1);
#else
_mm_store_si128((__m128i *)(dst_ptr), *poutput);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
__m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -267,7 +267,7 @@
_mm_storeu_si128((__m128i *)(dst_ptr + 4), out1);
#else
_mm_storeu_si128((__m128i *)(dst_ptr), *poutput);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1,
diff --git a/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index 975dde7..598f3d2 100644
--- a/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -15,7 +15,7 @@
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 1a8359f..548929d 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -3473,7 +3473,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
__m128i ubounded, retval;
const __m128i zero = _mm_set1_epi16(0);
@@ -4035,4 +4035,4 @@
}
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index ddb680f..dbe233d 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -94,7 +94,7 @@
// Function to allow 8 bit optimisations to be used when profile 0 is used with
// highbitdepth enabled
static INLINE __m128i load_input_data(const tran_low_t *data) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
data[6], data[7]);
#else
diff --git a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
index a835161..3890926 100644
--- a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
+++ b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
@@ -220,7 +220,7 @@
mova m12, [pw_11585x2]
lea r3, [2 * strideq]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
@@ -271,7 +271,7 @@
lea r3, [2 * strideq]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
@@ -793,7 +793,7 @@
lea r4, [rsp + transposed_in]
idct32x32_34_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1223,7 +1223,7 @@
mov r7, 2
idct32x32_135_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1261,7 +1261,7 @@
mova [r4 + 16 * 6], m6
mova [r4 + 16 * 7], m7
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
add r3, 32
%else
add r3, 16
@@ -1272,7 +1272,7 @@
IDCT32X32_135 16*0, 16*32, 16*64, 16*96
lea stp, [stp + 16 * 8]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea inputq, [inputq + 32 * 32]
%else
lea inputq, [inputq + 16 * 32]
@@ -1687,7 +1687,7 @@
mov r7, 4
idct32x32_1024_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1725,7 +1725,7 @@
mova [r4 + 16 * 5], m5
mova [r4 + 16 * 6], m6
mova [r4 + 16 * 7], m7
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
add r3, 32
%else
add r3, 16
@@ -1737,7 +1737,7 @@
IDCT32X32_1024 16*0, 16*32, 16*64, 16*96
lea stp, [stp + 16 * 8]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea inputq, [inputq + 32 * 32]
%else
lea inputq, [inputq + 16 * 32]
diff --git a/aom_dsp/x86/inv_wht_sse2.asm b/aom_dsp/x86/inv_wht_sse2.asm
index eec5047..ee80563 100644
--- a/aom_dsp/x86/inv_wht_sse2.asm
+++ b/aom_dsp/x86/inv_wht_sse2.asm
@@ -82,7 +82,7 @@
INIT_XMM sse2
cglobal iwht4x4_16_add, 3, 3, 7, input, output, stride
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
diff --git a/aom_dsp/x86/quantize_avx_x86_64.asm b/aom_dsp/x86/quantize_avx_x86_64.asm
index b3d78c4..b74d6ea 100644
--- a/aom_dsp/x86/quantize_avx_x86_64.asm
+++ b/aom_dsp/x86/quantize_avx_x86_64.asm
@@ -41,7 +41,7 @@
mova m0, [zbinq] ; m0 = zbin
; Get DC and first 15 AC coeffs - in this special case, that is all.
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers but we process them as 16 bit numbers
mova m9, [coeffq]
packssdw m9, [coeffq+16] ; m9 = c[i]
@@ -73,7 +73,7 @@
ptest m14, m14
jnz .single_nonzero
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [r1 ], ymm5
mova [r1+32], ymm5
mova [r2 ], ymm5
@@ -121,7 +121,7 @@
pand m8, m7
pand m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -142,7 +142,7 @@
punpckhqdq m3, m3
pmullw m13, m3 ; dqc[i] = qc[i] * q
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -226,7 +226,7 @@
DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea coeffq, [ coeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -239,7 +239,7 @@
neg ncoeffq
; get DC and first 15 AC coeffs
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers & require 16bit numbers
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
@@ -261,7 +261,7 @@
ptest m14, m14
jnz .first_nonzero
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4 ], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4 ], ymm5
@@ -299,7 +299,7 @@
pand m8, m7
pand m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -330,7 +330,7 @@
psignw m13, m10
%endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -360,7 +360,7 @@
.ac_only_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; pack coeff from 32bit to 16bit array
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
@@ -382,7 +382,7 @@
ptest m14, m14
jnz .rest_nonzero
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4+ 0], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4+ 0], ymm5
@@ -421,7 +421,7 @@
pand m14, m7
pand m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
@@ -451,7 +451,7 @@
psignw m13, m10
%endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
@@ -507,7 +507,7 @@
DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
%else
@@ -519,7 +519,7 @@
pxor m7, m7
.blank_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [dqcoeffq+ncoeffq*4+ 0], ymm7
mova [dqcoeffq+ncoeffq*4+32], ymm7
mova [qcoeffq+ncoeffq*4+ 0], ymm7
diff --git a/aom_dsp/x86/quantize_sse2.c b/aom_dsp/x86/quantize_sse2.c
index 3a2655f..39ce529 100644
--- a/aom_dsp/x86/quantize_sse2.c
+++ b/aom_dsp/x86/quantize_sse2.c
@@ -16,7 +16,7 @@
#include "aom/aom_integer.h"
static INLINE __m128i load_coefficients(const tran_low_t* coeff_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1],
(int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3],
(int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5],
@@ -28,7 +28,7 @@
static INLINE void store_coefficients(__m128i coeff_vals,
tran_low_t* coeff_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
__m128i one = _mm_set1_epi16(1);
__m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
__m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one);
diff --git a/aom_dsp/x86/quantize_ssse3_x86_64.asm b/aom_dsp/x86/quantize_ssse3_x86_64.asm
index f97ee57..4503370 100644
--- a/aom_dsp/x86/quantize_ssse3_x86_64.asm
+++ b/aom_dsp/x86/quantize_ssse3_x86_64.asm
@@ -53,7 +53,7 @@
%endif
pxor m5, m5 ; m5 = dedicated zero
DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea coeffq, [ coeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -66,7 +66,7 @@
neg ncoeffq
; get DC and first 15 AC coeffs
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers & require 16bit numbers
mova m9, [ coeffq+ncoeffq*4+ 0]
packssdw m9, [ coeffq+ncoeffq*4+16]
@@ -96,7 +96,7 @@
psignw m13, m10 ; m13 = reinsert sign
pand m8, m7
pand m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m8
mova m6, m8
@@ -131,7 +131,7 @@
psignw m8, m9
psignw m13, m10
%endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m8
mova m6, m8
@@ -166,7 +166,7 @@
jz .accumulate_eob
.ac_only_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; pack coeff from 32bit to 16bit array
mova m9, [ coeffq+ncoeffq*4+ 0]
packssdw m9, [ coeffq+ncoeffq*4+16]
@@ -198,7 +198,7 @@
psignw m13, m10 ; m13 = reinsert sign
pand m14, m7
pand m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pxor m11, m11
mova m11, m14
@@ -233,7 +233,7 @@
psignw m14, m9
psignw m13, m10
%endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m14
mova m6, m14
@@ -271,7 +271,7 @@
%ifidn %1, b_32x32
jmp .accumulate_eob
.skip_iter:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4+ 0], m5
mova [qcoeffq+ncoeffq*4+16], m5
mova [qcoeffq+ncoeffq*4+32], m5
@@ -310,7 +310,7 @@
mov r2, qcoeffmp
mov r3, eobmp
DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
%else
@@ -320,7 +320,7 @@
neg ncoeffq
pxor m7, m7
.blank_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [dqcoeffq+ncoeffq*4+ 0], m7
mova [dqcoeffq+ncoeffq*4+16], m7
mova [dqcoeffq+ncoeffq*4+32], m7
diff --git a/aom_mem/aom_mem.c b/aom_mem/aom_mem.c
index 6869352..cbfc704 100644
--- a/aom_mem/aom_mem.c
+++ b/aom_mem/aom_mem.c
@@ -87,11 +87,11 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void *aom_memset16(void *dest, int val, size_t length) {
size_t i;
uint16_t *dest16 = (uint16_t *)dest;
for (i = 0; i < length; i++) *dest16++ = val;
return dest;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_mem/aom_mem.h b/aom_mem/aom_mem.h
index 32744eb..ae5fe3a 100644
--- a/aom_mem/aom_mem.h
+++ b/aom_mem/aom_mem.h
@@ -30,7 +30,7 @@
void *aom_realloc(void *memblk, size_t size);
void aom_free(void *memblk);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void *aom_memset16(void *dest, int val, size_t length);
#endif
diff --git a/aom_ports/mem.h b/aom_ports/mem.h
index 08337a7..c06c50c 100644
--- a/aom_ports/mem.h
+++ b/aom_ports/mem.h
@@ -44,9 +44,9 @@
#define ALIGN_POWER_OF_TWO(value, n) \
(((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)x) << 1))
#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)x) >> 1))
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // VPX_PORTS_MEM_H_
diff --git a/aom_scale/aom_scale_rtcd.pl b/aom_scale/aom_scale_rtcd.pl
index 856abcd..6a3c65c 100644
--- a/aom_scale/aom_scale_rtcd.pl
+++ b/aom_scale/aom_scale_rtcd.pl
@@ -22,7 +22,7 @@
add_proto qw/void aom_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
-if (aom_config("CONFIG_VP10") eq "yes") {
+if (aom_config("CONFIG_AV1") eq "yes") {
add_proto qw/void aom_extend_frame_borders/, "struct yv12_buffer_config *ybf";
specialize qw/aom_extend_frame_borders dspr2/;
diff --git a/aom_scale/generic/yv12config.c b/aom_scale/generic/yv12config.c
index b789722..554965b 100644
--- a/aom_scale/generic/yv12config.c
+++ b/aom_scale/generic/yv12config.c
@@ -114,7 +114,7 @@
return -2;
}
-#if CONFIG_VP10
+#if CONFIG_AV1
// TODO(jkoleszar): Maybe replace this with struct aom_image
int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
@@ -136,7 +136,7 @@
int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border, int byte_alignment,
@@ -166,21 +166,21 @@
const uint64_t alpha_plane_size =
(alpha_height + 2 * alpha_border_h) * (uint64_t)alpha_stride +
byte_alignment;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint64_t frame_size =
(1 + use_highbitdepth) *
(yplane_size + 2 * uvplane_size + alpha_plane_size);
#else
const uint64_t frame_size =
yplane_size + 2 * uvplane_size + alpha_plane_size;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint64_t frame_size =
(1 + use_highbitdepth) * (yplane_size + 2 * uvplane_size);
#else
const uint64_t frame_size = yplane_size + 2 * uvplane_size;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_ALPHA
uint8_t *buf = NULL;
@@ -251,7 +251,7 @@
ybf->subsampling_y = ss_y;
buf = ybf->buffer_alloc;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_highbitdepth) {
// Store uint16 addresses when using 16bit framebuffers
buf = CONVERT_TO_BYTEPTR(ybf->buffer_alloc);
@@ -259,7 +259,7 @@
} else {
ybf->flags = 0;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
ybf->y_buffer = (uint8_t *)yv12_align_addr(
buf + (border * y_stride) + border, aom_byte_align);
@@ -288,14 +288,14 @@
int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border, int byte_alignment) {
if (ybf) {
aom_free_frame_buffer(ybf);
return aom_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
#endif
border, byte_alignment, NULL, NULL, NULL);
diff --git a/aom_scale/generic/yv12extend.c b/aom_scale/generic/yv12extend.c
index 2dacbed..017f6d1 100644
--- a/aom_scale/generic/yv12extend.c
+++ b/aom_scale/generic/yv12extend.c
@@ -16,7 +16,7 @@
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#include "av1/common/common.h"
#endif
@@ -60,7 +60,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void extend_plane_high(uint8_t *const src8, int src_stride, int width,
int height, int extend_top, int extend_left,
int extend_bottom, int extend_right) {
@@ -112,7 +112,7 @@
assert(ybf->y_height - ybf->y_crop_height >= 0);
assert(ybf->y_width - ybf->y_crop_width >= 0);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
ybf->y_crop_height, ybf->border, ybf->border,
@@ -147,7 +147,7 @@
uv_border + ybf->uv_width - ybf->uv_crop_width);
}
-#if CONFIG_VP10
+#if CONFIG_AV1
static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
const int c_w = ybf->uv_crop_width;
const int c_h = ybf->uv_crop_height;
@@ -163,7 +163,7 @@
assert(ybf->y_height - ybf->y_crop_height >= 0);
assert(ybf->y_width - ybf->y_crop_width >= 0);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
ybf->y_crop_height, ext_size, ext_size,
@@ -197,14 +197,14 @@
extend_frame(ybf, inner_bw);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) {
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
memcpy(dst, src, num * sizeof(uint16_t));
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
-#endif // CONFIG_VP10
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#endif // CONFIG_AV1
// Copies the source image into the destination image and updates the
// destination's UMV borders.
@@ -223,7 +223,7 @@
assert(src_ybc->y_height == dst_ybc->y_height);
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
assert(dst_ybc->flags & YV12_FLAG_HIGHBITDEPTH);
for (row = 0; row < src_ybc->y_height; ++row) {
@@ -290,7 +290,7 @@
const uint8_t *src = src_ybc->y_buffer;
uint8_t *dst = dst_ybc->y_buffer;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
diff --git a/aom_scale/yv12config.h b/aom_scale/yv12config.h
index 3b0e044..9a2fce3 100644
--- a/aom_scale/yv12config.h
+++ b/aom_scale/yv12config.h
@@ -75,7 +75,7 @@
int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border, int byte_alignment);
@@ -89,7 +89,7 @@
// on failure.
int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border, int byte_alignment,
diff --git a/aomdec.c b/aomdec.c
index 2824189..9230b3c 100644
--- a/aomdec.c
+++ b/aomdec.c
@@ -29,7 +29,7 @@
#include "aom_ports/mem_ops.h"
#include "aom_ports/aom_timer.h"
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
#include "aom/vp8dx.h"
#endif
@@ -89,7 +89,7 @@
ARG_DEF(NULL, "frame-buffers", 1, "Number of frame buffers to use");
static const arg_def_t md5arg =
ARG_DEF(NULL, "md5", 0, "Compute the MD5 sum of the decoded frame");
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const arg_def_t outbitdeptharg =
ARG_DEF(NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames");
#endif
@@ -100,7 +100,7 @@
&progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile,
&threadsarg, &frameparallelarg, &verbosearg, &scalearg, &fb_arg,
&md5arg, &error_concealment, &continuearg,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
&outbitdeptharg,
#endif
NULL
@@ -110,7 +110,7 @@
#if CONFIG_LIBYUV
static INLINE int libyuv_scale(aom_image_t *src, aom_image_t *dst,
FilterModeEnum mode) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->fmt == VPX_IMG_FMT_I42016) {
assert(dst->fmt == VPX_IMG_FMT_I42016);
return I420Scale_16(
@@ -253,7 +253,7 @@
static void write_image_file(const aom_image_t *img, const int planes[3],
FILE *file) {
int i, y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int bytes_per_sample = ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
#else
const int bytes_per_sample = 1;
@@ -454,7 +454,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int img_shifted_realloc_required(const aom_image_t *img,
const aom_image_t *shifted,
aom_img_fmt_t required_fmt) {
@@ -487,14 +487,14 @@
int opt_yv12 = 0;
int opt_i420 = 0;
aom_codec_dec_cfg_t cfg = { 0, 0, 0 };
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
unsigned int output_bit_depth = 0;
#endif
int frames_corrupted = 0;
int dec_flags = 0;
int do_scale = 0;
aom_image_t *scaled_img = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_image_t *img_shifted = NULL;
#endif
int frame_avail, got_data, flush_decoder = 0;
@@ -561,7 +561,7 @@
summary = 1;
else if (arg_match(&arg, &threadsarg, argi))
cfg.threads = arg_parse_uint(&arg);
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
else if (arg_match(&arg, &frameparallelarg, argi))
frame_parallel = 1;
#endif
@@ -573,7 +573,7 @@
num_external_frame_buffers = arg_parse_uint(&arg);
else if (arg_match(&arg, &continuearg, argi))
keep_going = 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
else if (arg_match(&arg, &outbitdeptharg, argi)) {
output_bit_depth = arg_parse_uint(&arg);
}
@@ -806,7 +806,7 @@
#endif
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Default to codec bit depth if output bit depth not set
if (!output_bit_depth && single_file && !do_md5) {
output_bit_depth = img->bit_depth;
@@ -941,7 +941,7 @@
if (input.aom_input_ctx->file_type != FILE_TYPE_WEBM) free(buf);
if (scaled_img) aom_img_free(scaled_img);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (img_shifted) aom_img_free(img_shifted);
#endif
diff --git a/aomenc.c b/aomenc.c
index c6e2ec0..552126c 100644
--- a/aomenc.c
+++ b/aomenc.c
@@ -33,10 +33,10 @@
#include "./ivfenc.h"
#include "./tools_common.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
#include "aom/vp8cx.h"
#endif
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
#include "aom/vp8dx.h"
#endif
@@ -195,7 +195,7 @@
ARG_DEF("y", "disable-warning-prompt", 0,
"Display warnings, but do not prompt user to continue.");
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const arg_def_t test16bitinternalarg = ARG_DEF(
NULL, "test-16bit-internal", 0, "Force use of 16 bit internal buffer");
#endif
@@ -268,7 +268,7 @@
&timebase,
&framerate,
&error_resilient,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
&test16bitinternalarg,
#endif
&lag_in_frames,
@@ -357,7 +357,7 @@
static const arg_def_t max_intra_rate_pct =
ARG_DEF(NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)");
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
static const arg_def_t cpu_used_vp9 =
ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-8..8)");
static const arg_def_t tile_cols =
@@ -411,7 +411,7 @@
ARG_DEF_ENUM(NULL, "color-space", 1, "The color space of input content:",
color_space_enum);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const struct arg_enum_list bitdepth_enum[] = {
{ "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 }
};
@@ -434,9 +434,9 @@
NULL, "tune-content", 1, "Tune content type", tune_content_enum);
#endif
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
/* clang-format off */
-static const arg_def_t *vp10_args[] = {
+static const arg_def_t *av1_args[] = {
&cpu_used_vp9, &auto_altref, &sharpness,
&static_thresh, &tile_cols, &tile_rows,
&arnr_maxframes, &arnr_strength, &arnr_type,
@@ -449,7 +449,7 @@
&noise_sens, &tune_content, &input_color_space,
&min_gf_interval, &max_gf_interval, NULL
};
-static const int vp10_arg_ctrl_map[] = {
+static const int av1_arg_ctrl_map[] = {
VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
VP9E_SET_TILE_COLUMNS, VP9E_SET_TILE_ROWS,
@@ -490,9 +490,9 @@
arg_show_usage(stderr, rc_twopass_args);
fprintf(stderr, "\nKeyframe Placement Options:\n");
arg_show_usage(stderr, kf_args);
-#if CONFIG_VP10_ENCODER
- fprintf(stderr, "\nVP10 Specific Options:\n");
- arg_show_usage(stderr, vp10_args);
+#if CONFIG_AV1_ENCODER
+ fprintf(stderr, "\nAV1 Specific Options:\n");
+ arg_show_usage(stderr, av1_args);
#endif
fprintf(stderr,
"\nStream timebase (--timebase):\n"
@@ -514,7 +514,7 @@
#define mmin(a, b) ((a) < (b) ? (a) : (b))
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void find_mismatch_high(const aom_image_t *const img1,
const aom_image_t *const img2, int yloc[4],
int uloc[4], int vloc[4]) {
@@ -711,7 +711,7 @@
match &= (img1->fmt == img2->fmt);
match &= (img1->d_w == img2->d_w);
match &= (img1->d_h == img2->d_h);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (img1->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
l_w *= 2;
c_w *= 2;
@@ -737,8 +737,8 @@
}
#define NELEMENTS(x) (sizeof(x) / sizeof(x[0]))
-#if CONFIG_VP10_ENCODER
-#define ARG_CTRL_CNT_MAX NELEMENTS(vp10_arg_ctrl_map)
+#if CONFIG_AV1_ENCODER
+#define ARG_CTRL_CNT_MAX NELEMENTS(av1_arg_ctrl_map)
#endif
#if !CONFIG_WEBM_IO
@@ -761,7 +761,7 @@
int arg_ctrl_cnt;
int write_webm;
int have_kf_max_dist;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// whether to use 16bit internal buffers
int use_16bit_internal;
#endif
@@ -898,7 +898,7 @@
}
/* Validate global config */
if (global->passes == 0) {
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
// Make default VP9 passes = 2 until there is a better quality 1-pass
// encoder
if (global->codec != NULL && global->codec->name != NULL)
@@ -1032,18 +1032,18 @@
static const int *ctrl_args_map = NULL;
struct stream_config *config = &stream->config;
int eos_mark_found = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int test_16bit_internal = 0;
#endif
// Handle codec specific options
if (0) {
-#if CONFIG_VP10_ENCODER
- } else if (strcmp(global->codec->name, "vp10") == 0) {
+#if CONFIG_AV1_ENCODER
+ } else if (strcmp(global->codec->name, "av1") == 0) {
// TODO(jingning): Reuse VP9 specific encoder configuration parameters.
- // Consider to expand this set for VP10 encoder control.
- ctrl_args = vp10_args;
- ctrl_args_map = vp10_arg_ctrl_map;
+ // Consider to expand this set for AV1 encoder control.
+ ctrl_args = av1_args;
+ ctrl_args_map = av1_arg_ctrl_map;
#endif
}
@@ -1085,7 +1085,7 @@
config->cfg.g_w = arg_parse_uint(&arg);
} else if (arg_match(&arg, &height, argi)) {
config->cfg.g_h = arg_parse_uint(&arg);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else if (arg_match(&arg, &bitdeptharg, argi)) {
config->cfg.g_bit_depth = arg_parse_enum_or_int(&arg);
} else if (arg_match(&arg, &inbitdeptharg, argi)) {
@@ -1158,10 +1158,10 @@
config->have_kf_max_dist = 1;
} else if (arg_match(&arg, &kf_disabled, argi)) {
config->cfg.kf_mode = VPX_KF_DISABLED;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else if (arg_match(&arg, &test16bitinternalarg, argi)) {
if (strcmp(global->codec->name, "vp9") == 0 ||
- strcmp(global->codec->name, "vp10") == 0) {
+ strcmp(global->codec->name, "av1") == 0) {
test_16bit_internal = 1;
}
#endif
@@ -1192,9 +1192,9 @@
if (!match) argj++;
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (strcmp(global->codec->name, "vp9") == 0 ||
- strcmp(global->codec->name, "vp10") == 0) {
+ strcmp(global->codec->name, "av1") == 0) {
config->use_16bit_internal =
test_16bit_internal | (config->cfg.g_profile > 1);
}
@@ -1461,7 +1461,7 @@
flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
flags |= stream->config.use_16bit_internal ? VPX_CODEC_USE_HIGHBITDEPTH : 0;
#endif
@@ -1506,7 +1506,7 @@
cfg->g_timebase.num / global->framerate.num;
/* Scale if necessary */
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (img) {
if ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) &&
(img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
@@ -1721,7 +1721,7 @@
enc_img = ref_enc.img;
aom_codec_control(&stream->decoder, VP9_GET_REFERENCE, &ref_dec);
dec_img = ref_dec.img;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if ((enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) !=
(dec_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH)) {
if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
@@ -1741,7 +1741,7 @@
if (!compare_img(&enc_img, &dec_img)) {
int y[4], u[4], v[4];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
find_mismatch_high(&enc_img, &dec_img, y, u, v);
} else {
@@ -1787,7 +1787,7 @@
int main(int argc, const char **argv_) {
int pass;
aom_image_t raw;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_image_t raw_shift;
int allocated_raw_shift = 0;
int use_16bit_internal = 0;
@@ -1857,7 +1857,7 @@
if (!input.filename) usage_exit();
/* Decide if other chroma subsamplings than 4:2:0 are supported */
- if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == VP10_FOURCC)
+ if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == AV1_FOURCC)
input.only_i420 = 0;
for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) {
@@ -1964,9 +1964,9 @@
open_output_file(stream, &global, &input.pixel_aspect_ratio));
FOREACH_STREAM(initialize_encoder(stream, &global));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (strcmp(global.codec->name, "vp9") == 0 ||
- strcmp(global.codec->name, "vp10") == 0) {
+ strcmp(global.codec->name, "av1") == 0) {
// Check to see if at least one stream uses 16 bit internal.
// Currently assume that the bit_depths for all streams using
// highbitdepth are the same.
@@ -2018,7 +2018,7 @@
frame_avail = 0;
if (frames_in > global.skip_frames) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_image_t *frame_to_encode;
if (input_shift || (use_16bit_internal && input.bit_depth == 8)) {
assert(use_16bit_internal);
@@ -2174,7 +2174,7 @@
});
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (allocated_raw_shift) aom_img_free(&raw_shift);
#endif
aom_img_free(&raw);
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
index 99cbe46..020095f 100644
--- a/av1/av1_common.mk
+++ b/av1/av1_common.mk
@@ -8,91 +8,91 @@
## be found in the AUTHORS file in the root of the source tree.
##
-VP10_COMMON_SRCS-yes += av1_common.mk
-VP10_COMMON_SRCS-yes += av1_iface_common.h
-VP10_COMMON_SRCS-yes += common/alloccommon.c
-VP10_COMMON_SRCS-yes += common/blockd.c
-VP10_COMMON_SRCS-yes += common/debugmodes.c
-VP10_COMMON_SRCS-yes += common/entropy.c
-VP10_COMMON_SRCS-yes += common/entropymode.c
-VP10_COMMON_SRCS-yes += common/entropymv.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.h
-VP10_COMMON_SRCS-yes += common/alloccommon.h
-VP10_COMMON_SRCS-yes += common/blockd.h
-VP10_COMMON_SRCS-yes += common/common.h
-VP10_COMMON_SRCS-yes += common/entropy.h
-VP10_COMMON_SRCS-yes += common/entropymode.h
-VP10_COMMON_SRCS-yes += common/entropymv.h
-VP10_COMMON_SRCS-yes += common/enums.h
-VP10_COMMON_SRCS-yes += common/filter.h
-VP10_COMMON_SRCS-yes += common/filter.c
-VP10_COMMON_SRCS-yes += common/idct.h
-VP10_COMMON_SRCS-yes += common/idct.c
-VP10_COMMON_SRCS-yes += common/av1_inv_txfm.h
-VP10_COMMON_SRCS-yes += common/av1_inv_txfm.c
-VP10_COMMON_SRCS-yes += common/loopfilter.h
-VP10_COMMON_SRCS-yes += common/thread_common.h
-VP10_COMMON_SRCS-yes += common/mv.h
-VP10_COMMON_SRCS-yes += common/onyxc_int.h
-VP10_COMMON_SRCS-yes += common/pred_common.h
-VP10_COMMON_SRCS-yes += common/pred_common.c
-VP10_COMMON_SRCS-yes += common/quant_common.h
-VP10_COMMON_SRCS-yes += common/reconinter.h
-VP10_COMMON_SRCS-yes += common/reconintra.h
-VP10_COMMON_SRCS-yes += common/av1_rtcd.c
-VP10_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
-VP10_COMMON_SRCS-yes += common/scale.h
-VP10_COMMON_SRCS-yes += common/scale.c
-VP10_COMMON_SRCS-yes += common/seg_common.h
-VP10_COMMON_SRCS-yes += common/seg_common.c
-VP10_COMMON_SRCS-yes += common/tile_common.h
-VP10_COMMON_SRCS-yes += common/tile_common.c
-VP10_COMMON_SRCS-yes += common/loopfilter.c
-VP10_COMMON_SRCS-yes += common/thread_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.h
-VP10_COMMON_SRCS-yes += common/quant_common.c
-VP10_COMMON_SRCS-yes += common/reconinter.c
-VP10_COMMON_SRCS-yes += common/reconintra.c
-VP10_COMMON_SRCS-yes += common/common_data.h
-VP10_COMMON_SRCS-yes += common/scan.c
-VP10_COMMON_SRCS-yes += common/scan.h
-VP10_COMMON_SRCS-yes += common/av1_fwd_txfm.h
-VP10_COMMON_SRCS-yes += common/av1_fwd_txfm.c
-VP10_COMMON_SRCS-yes += common/clpf.c
-VP10_COMMON_SRCS-yes += common/clpf.h
+AV1_COMMON_SRCS-yes += av1_common.mk
+AV1_COMMON_SRCS-yes += av1_iface_common.h
+AV1_COMMON_SRCS-yes += common/alloccommon.c
+AV1_COMMON_SRCS-yes += common/blockd.c
+AV1_COMMON_SRCS-yes += common/debugmodes.c
+AV1_COMMON_SRCS-yes += common/entropy.c
+AV1_COMMON_SRCS-yes += common/entropymode.c
+AV1_COMMON_SRCS-yes += common/entropymv.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.h
+AV1_COMMON_SRCS-yes += common/alloccommon.h
+AV1_COMMON_SRCS-yes += common/blockd.h
+AV1_COMMON_SRCS-yes += common/common.h
+AV1_COMMON_SRCS-yes += common/entropy.h
+AV1_COMMON_SRCS-yes += common/entropymode.h
+AV1_COMMON_SRCS-yes += common/entropymv.h
+AV1_COMMON_SRCS-yes += common/enums.h
+AV1_COMMON_SRCS-yes += common/filter.h
+AV1_COMMON_SRCS-yes += common/filter.c
+AV1_COMMON_SRCS-yes += common/idct.h
+AV1_COMMON_SRCS-yes += common/idct.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.c
+AV1_COMMON_SRCS-yes += common/loopfilter.h
+AV1_COMMON_SRCS-yes += common/thread_common.h
+AV1_COMMON_SRCS-yes += common/mv.h
+AV1_COMMON_SRCS-yes += common/onyxc_int.h
+AV1_COMMON_SRCS-yes += common/pred_common.h
+AV1_COMMON_SRCS-yes += common/pred_common.c
+AV1_COMMON_SRCS-yes += common/quant_common.h
+AV1_COMMON_SRCS-yes += common/reconinter.h
+AV1_COMMON_SRCS-yes += common/reconintra.h
+AV1_COMMON_SRCS-yes += common/av1_rtcd.c
+AV1_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
+AV1_COMMON_SRCS-yes += common/scale.h
+AV1_COMMON_SRCS-yes += common/scale.c
+AV1_COMMON_SRCS-yes += common/seg_common.h
+AV1_COMMON_SRCS-yes += common/seg_common.c
+AV1_COMMON_SRCS-yes += common/tile_common.h
+AV1_COMMON_SRCS-yes += common/tile_common.c
+AV1_COMMON_SRCS-yes += common/loopfilter.c
+AV1_COMMON_SRCS-yes += common/thread_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.h
+AV1_COMMON_SRCS-yes += common/quant_common.c
+AV1_COMMON_SRCS-yes += common/reconinter.c
+AV1_COMMON_SRCS-yes += common/reconintra.c
+AV1_COMMON_SRCS-yes += common/common_data.h
+AV1_COMMON_SRCS-yes += common/scan.c
+AV1_COMMON_SRCS-yes += common/scan.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.c
+AV1_COMMON_SRCS-yes += common/clpf.c
+AV1_COMMON_SRCS-yes += common/clpf.h
ifeq ($(CONFIG_DERING),yes)
-VP10_COMMON_SRCS-yes += common/od_dering.c
-VP10_COMMON_SRCS-yes += common/od_dering.h
-VP10_COMMON_SRCS-yes += common/dering.c
-VP10_COMMON_SRCS-yes += common/dering.h
+AV1_COMMON_SRCS-yes += common/od_dering.c
+AV1_COMMON_SRCS-yes += common/od_dering.h
+AV1_COMMON_SRCS-yes += common/dering.c
+AV1_COMMON_SRCS-yes += common/dering.h
endif
-VP10_COMMON_SRCS-yes += common/odintrin.c
-VP10_COMMON_SRCS-yes += common/odintrin.h
+AV1_COMMON_SRCS-yes += common/odintrin.c
+AV1_COMMON_SRCS-yes += common/odintrin.h
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans4_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans8_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans16_dspr2.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans4_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans8_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans16_dspr2.c
endif
# common (msa)
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
endif
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
$(eval $(call rtcd_h_template,av1_rtcd,av1/common/av1_rtcd_defs.pl))
diff --git a/av1/av1_cx.mk b/av1/av1_cx.mk
index 22448ce..065bef4 100644
--- a/av1/av1_cx.mk
+++ b/av1/av1_cx.mk
@@ -8,112 +8,112 @@
## be found in the AUTHORS file in the root of the source tree.
##
-VP10_CX_EXPORTS += exports_enc
+AV1_CX_EXPORTS += exports_enc
-VP10_CX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_CX_SRCS-no += $(VP10_COMMON_SRCS-no)
-VP10_CX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_CX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no += $(AV1_COMMON_SRCS_REMOVE-no)
-VP10_CX_SRCS-yes += av1_cx_iface.c
+AV1_CX_SRCS-yes += av1_cx_iface.c
-VP10_CX_SRCS-yes += encoder/bitstream.c
-VP10_CX_SRCS-yes += encoder/context_tree.c
-VP10_CX_SRCS-yes += encoder/context_tree.h
-VP10_CX_SRCS-yes += encoder/cost.h
-VP10_CX_SRCS-yes += encoder/cost.c
-VP10_CX_SRCS-yes += encoder/dct.c
-VP10_CX_SRCS-yes += encoder/encodeframe.c
-VP10_CX_SRCS-yes += encoder/encodeframe.h
-VP10_CX_SRCS-yes += encoder/encodemb.c
-VP10_CX_SRCS-yes += encoder/encodemv.c
-VP10_CX_SRCS-yes += encoder/ethread.h
-VP10_CX_SRCS-yes += encoder/ethread.c
-VP10_CX_SRCS-yes += encoder/extend.c
-VP10_CX_SRCS-yes += encoder/firstpass.c
-VP10_CX_SRCS-yes += encoder/block.h
-VP10_CX_SRCS-yes += encoder/bitstream.h
-VP10_CX_SRCS-yes += encoder/encodemb.h
-VP10_CX_SRCS-yes += encoder/encodemv.h
-VP10_CX_SRCS-yes += encoder/extend.h
-VP10_CX_SRCS-yes += encoder/firstpass.h
-VP10_CX_SRCS-yes += encoder/lookahead.c
-VP10_CX_SRCS-yes += encoder/lookahead.h
-VP10_CX_SRCS-yes += encoder/mcomp.h
-VP10_CX_SRCS-yes += encoder/encoder.h
-VP10_CX_SRCS-yes += encoder/quantize.h
-VP10_CX_SRCS-yes += encoder/ratectrl.h
-VP10_CX_SRCS-yes += encoder/rd.h
-VP10_CX_SRCS-yes += encoder/rdopt.h
-VP10_CX_SRCS-yes += encoder/tokenize.h
-VP10_CX_SRCS-yes += encoder/treewriter.h
-VP10_CX_SRCS-yes += encoder/mcomp.c
-VP10_CX_SRCS-yes += encoder/encoder.c
-VP10_CX_SRCS-yes += encoder/picklpf.c
-VP10_CX_SRCS-yes += encoder/picklpf.h
-VP10_CX_SRCS-yes += encoder/quantize.c
-VP10_CX_SRCS-yes += encoder/ratectrl.c
-VP10_CX_SRCS-yes += encoder/rd.c
-VP10_CX_SRCS-yes += encoder/rdopt.c
-VP10_CX_SRCS-yes += encoder/segmentation.c
-VP10_CX_SRCS-yes += encoder/segmentation.h
-VP10_CX_SRCS-yes += encoder/speed_features.c
-VP10_CX_SRCS-yes += encoder/speed_features.h
-VP10_CX_SRCS-yes += encoder/subexp.c
-VP10_CX_SRCS-yes += encoder/subexp.h
-VP10_CX_SRCS-yes += encoder/resize.c
-VP10_CX_SRCS-yes += encoder/resize.h
-VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
-VP10_CX_SRCS-yes += encoder/tokenize.c
-VP10_CX_SRCS-yes += encoder/treewriter.c
-VP10_CX_SRCS-yes += encoder/aq_variance.c
-VP10_CX_SRCS-yes += encoder/aq_variance.h
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
-VP10_CX_SRCS-yes += encoder/aq_complexity.c
-VP10_CX_SRCS-yes += encoder/aq_complexity.h
-VP10_CX_SRCS-yes += encoder/skin_detection.c
-VP10_CX_SRCS-yes += encoder/skin_detection.h
-VP10_CX_SRCS-yes += encoder/temporal_filter.c
-VP10_CX_SRCS-yes += encoder/temporal_filter.h
-VP10_CX_SRCS-yes += encoder/mbgraph.c
-VP10_CX_SRCS-yes += encoder/mbgraph.h
-VP10_CX_SRCS-yes += encoder/pickdering.c
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/skin_detection.c
+AV1_CX_SRCS-yes += encoder/skin_detection.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
+AV1_CX_SRCS-yes += encoder/pickdering.c
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
endif
ifeq ($(CONFIG_USE_X86INC),yes)
-VP10_CX_SRCS-$(HAVE_MMX) += encoder/x86/dct_mmx.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+AV1_CX_SRCS-$(HAVE_MMX) += encoder/x86/dct_mmx.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
endif
ifeq ($(ARCH_X86_64),yes)
ifeq ($(CONFIG_USE_X86INC),yes)
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
endif
endif
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
-VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
endif
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
-VP10_CX_SRCS-yes := $(filter-out $(VP10_CX_SRCS_REMOVE-yes),$(VP10_CX_SRCS-yes))
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c
index 162ee06..9215237 100644
--- a/av1/av1_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -22,7 +22,7 @@
#include "av1/encoder/firstpass.h"
#include "av1/av1_iface_common.h"
-struct vp10_extracfg {
+struct av1_extracfg {
int cpu_used; // available cpu percentage in 1/16
unsigned int enable_auto_alt_ref;
unsigned int noise_sensitivity;
@@ -56,7 +56,7 @@
int render_height;
};
-static struct vp10_extracfg default_extra_cfg = {
+static struct av1_extracfg default_extra_cfg = {
0, // cpu_used
1, // enable_auto_alt_ref
0, // noise_sensitivity
@@ -93,9 +93,9 @@
struct aom_codec_alg_priv {
aom_codec_priv_t base;
aom_codec_enc_cfg_t cfg;
- struct vp10_extracfg extra_cfg;
- VP10EncoderConfig oxcf;
- VP10_COMP *cpi;
+ struct av1_extracfg extra_cfg;
+ AV1EncoderConfig oxcf;
+ AV1_COMP *cpi;
unsigned char *cx_data;
size_t cx_data_sz;
unsigned char *pending_cx_data;
@@ -115,7 +115,7 @@
BufferPool *buffer_pool;
};
-static VPX_REFFRAME ref_frame_to_vp10_reframe(aom_ref_frame_type_t frame) {
+static VPX_REFFRAME ref_frame_to_av1_reframe(aom_ref_frame_type_t frame) {
switch (frame) {
case VP8_LAST_FRAME: return VPX_LAST_FLAG;
case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
@@ -165,7 +165,7 @@
static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
const aom_codec_enc_cfg_t *cfg,
- const struct vp10_extracfg *extra_cfg) {
+ const struct av1_extracfg *extra_cfg) {
RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
@@ -204,11 +204,11 @@
RANGE_CHECK(cfg, rc_scaled_height, 0, cfg->g_h);
}
- // Spatial/temporal scalability are not yet supported in VP10.
+ // Spatial/temporal scalability are not yet supported in AV1.
// Only accept the default value for range checking.
RANGE_CHECK(cfg, ss_number_layers, 1, 1);
RANGE_CHECK(cfg, ts_number_layers, 1, 1);
- // VP10 does not support a lower bound on the keyframe interval in
+ // AV1 does not support a lower bound on the keyframe interval in
// automatic keyframe placement mode.
if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
cfg->kf_min_dist > 0)
@@ -229,9 +229,9 @@
RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
- // TODO(yaowu): remove this when ssim tuning is implemented for vp10
+ // TODO(yaowu): remove this when ssim tuning is implemented for av1
if (extra_cfg->tuning == VPX_TUNE_SSIM)
- ERROR("Option --tune=ssim is not currently supported in VP10.");
+ ERROR("Option --tune=ssim is not currently supported in AV1.");
if (cfg->g_pass == VPX_RC_LAST_PASS) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
@@ -254,7 +254,7 @@
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
-#if !CONFIG_VPX_HIGHBITDEPTH
+#if !CONFIG_AOM_HIGHBITDEPTH
if (cfg->g_profile > (unsigned int)PROFILE_1) {
ERROR("Profile > 1 not supported in this build configuration");
}
@@ -330,8 +330,8 @@
}
static aom_codec_err_t set_encoder_config(
- VP10EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
- const struct vp10_extracfg *extra_cfg) {
+ AV1EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
+ const struct av1_extracfg *extra_cfg) {
const int is_vbr = cfg->rc_end_usage == VPX_VBR;
oxcf->profile = cfg->g_profile;
oxcf->max_threads = (int)cfg->g_threads;
@@ -362,10 +362,10 @@
oxcf->gf_cbr_boost_pct = extra_cfg->gf_cbr_boost_pct;
oxcf->best_allowed_q =
- extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
+ extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_min_quantizer);
oxcf->worst_allowed_q =
- extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
- oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+ extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_max_quantizer);
+ oxcf->cq_level = av1_quantizer_to_qindex(extra_cfg->cq_level);
oxcf->fixed_q = -1;
#if CONFIG_AOM_QM
@@ -438,7 +438,7 @@
oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
/*
- printf("Current VP10 Settings: \n");
+ printf("Current AV1 Settings: \n");
printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
printf("sharpness: %d\n", oxcf->sharpness);
@@ -500,7 +500,7 @@
set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
// On profile change, request a key frame
force_key |= ctx->cpi->common.profile != ctx->oxcf.profile;
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
@@ -512,7 +512,7 @@
va_list args) {
int *const arg = va_arg(args, int *);
if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
- *arg = vp10_get_quantizer(ctx->cpi);
+ *arg = av1_get_quantizer(ctx->cpi);
return VPX_CODEC_OK;
}
@@ -520,80 +520,80 @@
va_list args) {
int *const arg = va_arg(args, int *);
if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
- *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
+ *arg = av1_qindex_to_quantizer(av1_get_quantizer(ctx->cpi));
return VPX_CODEC_OK;
}
static aom_codec_err_t update_extra_cfg(aom_codec_alg_priv_t *ctx,
- const struct vp10_extracfg *extra_cfg) {
+ const struct av1_extracfg *extra_cfg) {
const aom_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
if (res == VPX_CODEC_OK) {
ctx->extra_cfg = *extra_cfg;
set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
return res;
}
static aom_codec_err_t ctrl_set_cpuused(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_enable_auto_alt_ref(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_noise_sensitivity(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.noise_sensitivity = CAST(VP9E_SET_NOISE_SENSITIVITY, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_sharpness(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.sharpness = CAST(VP8E_SET_SHARPNESS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_static_thresh(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_tile_columns(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.tile_columns = CAST(VP9E_SET_TILE_COLUMNS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_tile_rows(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.tile_rows = CAST(VP9E_SET_TILE_ROWS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_arnr_max_frames(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_arnr_strength(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args);
return update_extra_cfg(ctx, &extra_cfg);
}
@@ -607,21 +607,21 @@
static aom_codec_err_t ctrl_set_tuning(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.tuning = CAST(VP8E_SET_TUNING, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_cq_level(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
aom_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.rc_max_intra_bitrate_pct =
CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
@@ -629,7 +629,7 @@
static aom_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
aom_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.rc_max_inter_bitrate_pct =
CAST(VP8E_SET_MAX_INTER_BITRATE_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
@@ -637,14 +637,14 @@
static aom_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_lossless(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.lossless = CAST(VP9E_SET_LOSSLESS, args);
return update_extra_cfg(ctx, &extra_cfg);
}
@@ -652,21 +652,21 @@
#if CONFIG_AOM_QM
static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.enable_qm = CAST(VP9E_SET_ENABLE_QM, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_qm_min(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.qm_min = CAST(VP9E_SET_QM_MIN, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_qm_max(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.qm_max = CAST(VP9E_SET_QM_MAX, args);
return update_extra_cfg(ctx, &extra_cfg);
}
@@ -674,7 +674,7 @@
static aom_codec_err_t ctrl_set_frame_parallel_decoding_mode(
aom_codec_alg_priv_t *ctx, va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.frame_parallel_decoding_mode =
CAST(VP9E_SET_FRAME_PARALLEL_DECODING, args);
return update_extra_cfg(ctx, &extra_cfg);
@@ -682,28 +682,28 @@
static aom_codec_err_t ctrl_set_aq_mode(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.aq_mode = CAST(VP9E_SET_AQ_MODE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_min_gf_interval(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.min_gf_interval = CAST(VP9E_SET_MIN_GF_INTERVAL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_max_gf_interval(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.max_gf_interval = CAST(VP9E_SET_MAX_GF_INTERVAL, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_frame_periodic_boost(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.frame_periodic_boost = CAST(VP9E_SET_FRAME_PERIODIC_BOOST, args);
return update_extra_cfg(ctx, &extra_cfg);
}
@@ -736,17 +736,17 @@
}
priv->extra_cfg = default_extra_cfg;
- once(vp10_initialize_enc);
+ once(av1_initialize_enc);
res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
if (res == VPX_CODEC_OK) {
set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
priv->oxcf.use_highbitdepth =
(ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
#endif
- priv->cpi = vp10_create_compressor(&priv->oxcf, priv->buffer_pool);
+ priv->cpi = av1_create_compressor(&priv->oxcf, priv->buffer_pool);
if (priv->cpi == NULL)
res = VPX_CODEC_MEM_ERROR;
else
@@ -759,7 +759,7 @@
static aom_codec_err_t encoder_destroy(aom_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
- vp10_remove_compressor(ctx->cpi);
+ av1_remove_compressor(ctx->cpi);
#if CONFIG_MULTITHREAD
pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
#endif
@@ -796,7 +796,7 @@
if (ctx->oxcf.mode != new_mode) {
ctx->oxcf.mode = new_mode;
- vp10_change_config(ctx->cpi, &ctx->oxcf);
+ av1_change_config(ctx->cpi, &ctx->oxcf);
}
}
@@ -875,7 +875,7 @@
return index_sz;
}
-// vp10 uses 10,000,000 ticks/second as time stamp
+// av1 uses 10,000,000 ticks/second as time stamp
#define TICKS_PER_SEC 10000000LL
static int64_t timebase_units_to_ticks(const aom_rational_t *timebase,
@@ -889,7 +889,7 @@
return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC;
}
-static aom_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi,
+static aom_codec_frame_flags_t get_frame_pkt_flags(const AV1_COMP *cpi,
unsigned int lib_flags) {
aom_codec_frame_flags_t flags = lib_flags << 16;
@@ -907,7 +907,7 @@
aom_enc_frame_flags_t flags,
unsigned long deadline) {
aom_codec_err_t res = VPX_CODEC_OK;
- VP10_COMP *const cpi = ctx->cpi;
+ AV1_COMP *const cpi = ctx->cpi;
const aom_rational_t *const timebase = &ctx->cfg.g_timebase;
size_t data_sz;
@@ -942,7 +942,7 @@
return VPX_CODEC_INVALID_PARAM;
}
- vp10_apply_encoding_flags(cpi, flags);
+ av1_apply_encoding_flags(cpi, flags);
// Handle fixed keyframe intervals
if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
@@ -971,7 +971,7 @@
// Store the original flags in to the frame buffer. Will extract the
// key frame flag when we actually encode this frame.
- if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+ if (av1_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
dst_time_stamp, dst_end_time_stamp)) {
res = update_error_state(ctx, &cpi->common.error);
}
@@ -998,7 +998,7 @@
}
while (cx_data_sz >= ctx->cx_data_sz / 2 &&
- -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+ -1 != av1_get_compressed_data(cpi, &lib_flags, &size, cx_data,
&dst_time_stamp, &dst_end_time_stamp,
!img)) {
if (size) {
@@ -1094,8 +1094,8 @@
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- vp10_set_reference_enc(ctx->cpi,
- ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+ av1_set_reference_enc(ctx->cpi,
+ ref_frame_to_av1_reframe(frame->frame_type), &sd);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -1110,8 +1110,8 @@
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- vp10_copy_reference_enc(ctx->cpi,
- ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+ av1_copy_reference_enc(ctx->cpi,
+ ref_frame_to_av1_reframe(frame->frame_type), &sd);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -1143,7 +1143,7 @@
static aom_image_t *encoder_get_preview(aom_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
- if (vp10_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
+ if (av1_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
yuvconfig2image(&ctx->preview_img, &sd, NULL);
return &ctx->preview_img;
} else {
@@ -1156,7 +1156,7 @@
(void)ctx;
(void)args;
- // TODO(yaowu): Need to re-implement and test for VP10.
+ // TODO(yaowu): Need to re-implement and test for AV1.
return VPX_CODEC_INVALID_PARAM;
}
@@ -1165,7 +1165,7 @@
aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
if (map) {
- if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ if (!av1_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
(int)map->cols))
return VPX_CODEC_OK;
else
@@ -1180,7 +1180,7 @@
aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
if (map) {
- if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ if (!av1_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
(int)map->cols))
return VPX_CODEC_OK;
else
@@ -1196,7 +1196,7 @@
if (mode) {
const int res =
- vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
+ av1_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
(VPX_SCALING)mode->v_scaling_mode);
return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
} else {
@@ -1216,28 +1216,28 @@
static aom_codec_err_t ctrl_set_tune_content(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.content = CAST(VP9E_SET_TUNE_CONTENT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_color_space(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_color_range(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.color_range = CAST(VP9E_SET_COLOR_RANGE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static aom_codec_err_t ctrl_set_render_size(aom_codec_alg_priv_t *ctx,
va_list args) {
- struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+ struct av1_extracfg extra_cfg = ctx->extra_cfg;
int *const render_size = va_arg(args, int *);
extra_cfg.render_width = render_size[0];
extra_cfg.render_height = render_size[1];
@@ -1345,7 +1345,7 @@
9999, // kf_max_dist
// TODO(yunqingwang): Spatial/temporal scalability are not supported
- // in VP10. The following 10 parameters are not used, which should
+ // in AV1. The following 10 parameters are not used, which should
// be removed later.
1, // ss_number_layers
{ 0 },
@@ -1363,10 +1363,10 @@
#ifndef VERSION_STRING
#define VERSION_STRING
#endif
-CODEC_INTERFACE(aom_codec_vp10_cx) = {
- "WebM Project VP10 Encoder" VERSION_STRING,
+CODEC_INTERFACE(aom_codec_av1_cx) = {
+ "WebM Project AV1 Encoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
VPX_CODEC_CAP_HIGHBITDEPTH |
#endif
VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // aom_codec_caps_t
diff --git a/av1/av1_dx.mk b/av1/av1_dx.mk
index f3d455e..d1628de 100644
--- a/av1/av1_dx.mk
+++ b/av1/av1_dx.mk
@@ -8,26 +8,26 @@
## be found in the AUTHORS file in the root of the source tree.
##
-VP10_DX_EXPORTS += exports_dec
+AV1_DX_EXPORTS += exports_dec
-VP10_DX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_DX_SRCS-no += $(VP10_COMMON_SRCS-no)
-VP10_DX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_DX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_DX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_DX_SRCS-no += $(AV1_COMMON_SRCS-no)
+AV1_DX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_DX_SRCS_REMOVE-no += $(AV1_COMMON_SRCS_REMOVE-no)
-VP10_DX_SRCS-yes += av1_dx_iface.c
+AV1_DX_SRCS-yes += av1_dx_iface.c
-VP10_DX_SRCS-yes += decoder/decodemv.c
-VP10_DX_SRCS-yes += decoder/decodeframe.c
-VP10_DX_SRCS-yes += decoder/decodeframe.h
-VP10_DX_SRCS-yes += decoder/detokenize.c
-VP10_DX_SRCS-yes += decoder/decodemv.h
-VP10_DX_SRCS-yes += decoder/detokenize.h
-VP10_DX_SRCS-yes += decoder/dthread.c
-VP10_DX_SRCS-yes += decoder/dthread.h
-VP10_DX_SRCS-yes += decoder/decoder.c
-VP10_DX_SRCS-yes += decoder/decoder.h
-VP10_DX_SRCS-yes += decoder/dsubexp.c
-VP10_DX_SRCS-yes += decoder/dsubexp.h
+AV1_DX_SRCS-yes += decoder/decodemv.c
+AV1_DX_SRCS-yes += decoder/decodeframe.c
+AV1_DX_SRCS-yes += decoder/decodeframe.h
+AV1_DX_SRCS-yes += decoder/detokenize.c
+AV1_DX_SRCS-yes += decoder/decodemv.h
+AV1_DX_SRCS-yes += decoder/detokenize.h
+AV1_DX_SRCS-yes += decoder/dthread.c
+AV1_DX_SRCS-yes += decoder/dthread.h
+AV1_DX_SRCS-yes += decoder/decoder.c
+AV1_DX_SRCS-yes += decoder/decoder.h
+AV1_DX_SRCS-yes += decoder/dsubexp.c
+AV1_DX_SRCS-yes += decoder/dsubexp.h
-VP10_DX_SRCS-yes := $(filter-out $(VP10_DX_SRCS_REMOVE-yes),$(VP10_DX_SRCS-yes))
+AV1_DX_SRCS-yes := $(filter-out $(AV1_DX_SRCS_REMOVE-yes),$(AV1_DX_SRCS-yes))
diff --git a/av1/av1_dx_iface.c b/av1/av1_dx_iface.c
index f4fee97..bccbb88 100644
--- a/av1/av1_dx_iface.c
+++ b/av1/av1_dx_iface.c
@@ -30,7 +30,7 @@
#include "av1/av1_iface_common.h"
-typedef aom_codec_stream_info_t vp10_stream_info_t;
+typedef aom_codec_stream_info_t av1_stream_info_t;
// This limit is due to framebuffer numbers.
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
@@ -44,7 +44,7 @@
struct aom_codec_alg_priv {
aom_codec_priv_t base;
aom_codec_dec_cfg_t cfg;
- vp10_stream_info_t si;
+ av1_stream_info_t si;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
aom_decrypt_cb decrypt_cb;
@@ -73,7 +73,7 @@
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
BufferPool *buffer_pool;
- // External frame buffer info to save for VP10 common.
+ // External frame buffer info to save for AV1 common.
void *ext_priv; // Private data associated with the external frame buffers.
aom_get_frame_buffer_cb_fn_t get_ext_fb_cb;
aom_release_frame_buffer_cb_fn_t release_ext_fb_cb;
@@ -118,8 +118,8 @@
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
aom_get_worker_interface()->end(worker);
- vp10_remove_common(&frame_worker_data->pbi->common);
- vp10_decoder_remove(frame_worker_data->pbi);
+ av1_remove_common(&frame_worker_data->pbi->common);
+ av1_decoder_remove(frame_worker_data->pbi);
aom_free(frame_worker_data->scratch_buffer);
#if CONFIG_MULTITHREAD
pthread_mutex_destroy(&frame_worker_data->stats_mutex);
@@ -133,8 +133,8 @@
}
if (ctx->buffer_pool) {
- vp10_free_ref_frame_buffers(ctx->buffer_pool);
- vp10_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+ av1_free_ref_frame_buffers(ctx->buffer_pool);
+ av1_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
}
aom_free(ctx->frame_workers);
@@ -187,7 +187,7 @@
int error_resilient;
struct aom_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
const int frame_marker = aom_rb_read_literal(&rb, 2);
- const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
+ const BITSTREAM_PROFILE profile = av1_read_profile(&rb);
if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
@@ -208,24 +208,24 @@
error_resilient = aom_rb_read_bit(&rb);
if (si->is_kf) {
- if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!av1_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
- vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+ av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
} else {
intra_only_flag = show_frame ? 0 : aom_rb_read_bit(&rb);
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
- if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!av1_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
}
rb.bit_offset += REF_FRAMES; // refresh_frame_flags
- vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+ av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
}
}
}
@@ -241,8 +241,8 @@
static aom_codec_err_t decoder_get_si(aom_codec_alg_priv_t *ctx,
aom_codec_stream_info_t *si) {
- const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
- ? sizeof(vp10_stream_info_t)
+ const size_t sz = (si->sz >= sizeof(av1_stream_info_t))
+ ? sizeof(av1_stream_info_t)
: sizeof(aom_codec_stream_info_t);
memcpy(si, &ctx->si, sz);
si->sz = (unsigned int)sz;
@@ -269,7 +269,7 @@
for (i = 0; i < ctx->num_frame_workers; ++i) {
VPxWorker *const worker = &ctx->frame_workers[i];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
BufferPool *const pool = cm->buffer_pool;
cm->new_fb_idx = INVALID_IDX;
@@ -281,10 +281,10 @@
pool->release_fb_cb = ctx->release_ext_fb_cb;
pool->cb_priv = ctx->ext_priv;
} else {
- pool->get_fb_cb = vp10_get_frame_buffer;
- pool->release_fb_cb = vp10_release_frame_buffer;
+ pool->get_fb_cb = av1_get_frame_buffer;
+ pool->release_fb_cb = av1_release_frame_buffer;
- if (vp10_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+ if (av1_alloc_internal_frame_buffers(&pool->int_frame_buffers))
aom_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to initialize internal frame buffers");
@@ -304,7 +304,7 @@
const uint8_t *data = frame_worker_data->data;
(void)arg2;
- frame_worker_data->result = vp10_receive_compressed_data(
+ frame_worker_data->result = av1_receive_compressed_data(
frame_worker_data->pbi, frame_worker_data->data_size, &data);
frame_worker_data->data_end = data;
@@ -316,14 +316,14 @@
VPxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
// Signal all the other threads that are waiting for this frame.
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
frame_worker_data->frame_context_ready = 1;
lock_buffer_pool(pool);
frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
unlock_buffer_pool(pool);
frame_worker_data->pbi->need_resync = 1;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
return 0;
}
} else if (frame_worker_data->result != 0) {
@@ -380,7 +380,7 @@
return VPX_CODEC_MEM_ERROR;
}
frame_worker_data = (FrameWorkerData *)worker->data1;
- frame_worker_data->pbi = vp10_decoder_create(ctx->buffer_pool);
+ frame_worker_data->pbi = av1_decoder_create(ctx->buffer_pool);
if (frame_worker_data->pbi == NULL) {
set_error_detail(ctx, "Failed to allocate frame_worker_data");
return VPX_CODEC_MEM_ERROR;
@@ -428,7 +428,7 @@
}
static INLINE void check_resync(aom_codec_alg_priv_t *const ctx,
- const VP10Decoder *const pbi) {
+ const AV1Decoder *const pbi) {
// Clear resync flag if worker got a key frame or intra only frame.
if (ctx->need_resync == 1 && pbi->need_resync == 0 &&
(pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME))
@@ -482,7 +482,7 @@
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
// Copy context from last worker thread to next worker thread.
if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
- vp10_frameworker_copy_context(
+ av1_frameworker_copy_context(
&ctx->frame_workers[ctx->next_submit_worker_id],
&ctx->frame_workers[ctx->last_submit_worker_id]);
@@ -537,8 +537,8 @@
check_resync(ctx, frame_worker_data->pbi);
- if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
@@ -573,7 +573,7 @@
if (res != VPX_CODEC_OK) return res;
}
- res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
+ res = av1_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
ctx->decrypt_cb, ctx->decrypt_state);
if (res != VPX_CODEC_OK) return res;
@@ -721,8 +721,8 @@
frame_worker_data->received_frame = 0;
check_resync(ctx, frame_worker_data->pbi);
}
- if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
- VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+ AV1_COMMON *const cm = &frame_worker_data->pbi->common;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
release_last_output_frame(ctx);
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
@@ -777,7 +777,7 @@
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp10_set_reference_dec(&frame_worker_data->pbi->common,
+ return av1_set_reference_dec(&frame_worker_data->pbi->common,
(VPX_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -800,7 +800,7 @@
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp10_copy_reference_dec(frame_worker_data->pbi,
+ return av1_copy_reference_dec(frame_worker_data->pbi,
(VPX_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -908,7 +908,7 @@
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
frame_size[0] = cm->width;
frame_size[1] = cm->height;
return VPX_CODEC_OK;
@@ -935,7 +935,7 @@
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
render_size[0] = cm->render_width;
render_size[1] = cm->render_height;
return VPX_CODEC_OK;
@@ -956,7 +956,7 @@
if (worker) {
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
- const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+ const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
*bit_depth = cm->bit_depth;
return VPX_CODEC_OK;
} else {
@@ -1045,8 +1045,8 @@
#ifndef VERSION_STRING
#define VERSION_STRING
#endif
-CODEC_INTERFACE(aom_codec_vp10_dx) = {
- "WebM Project VP10 Decoder" VERSION_STRING,
+CODEC_INTERFACE(aom_codec_av1_dx) = {
+ "WebM Project AV1 Decoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_DECODER |
VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // aom_codec_caps_t
diff --git a/av1/av1_iface_common.h b/av1/av1_iface_common.h
index 1a91c3b..b4a01af 100644
--- a/av1/av1_iface_common.h
+++ b/av1/av1_iface_common.h
@@ -8,8 +8,8 @@
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_VP10_IFACE_COMMON_H_
-#define VP10_VP10_IFACE_COMMON_H_
+#ifndef AV1_AV1_IFACE_COMMON_H_
+#define AV1_AV1_IFACE_COMMON_H_
#include "aom_ports/mem.h"
@@ -56,7 +56,7 @@
img->stride[VPX_PLANE_U] = yv12->uv_stride;
img->stride[VPX_PLANE_V] = yv12->uv_stride;
img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
// aom_image_t uses byte strides and a pointer to the first byte
// of the image.
@@ -71,7 +71,7 @@
img->stride[VPX_PLANE_V] = 2 * yv12->uv_stride;
img->stride[VPX_PLANE_ALPHA] = 2 * yv12->y_stride;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
img->bps = bps;
img->user_priv = user_priv;
img->img_data = yv12->buffer_alloc;
@@ -104,7 +104,7 @@
yv12->color_space = img->cs;
yv12->color_range = img->range;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
// In aom_image_t
// planes point to uint8 address of start of data
@@ -128,10 +128,10 @@
yv12->border = (yv12->y_stride - img->w) / 2;
#else
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
yv12->subsampling_x = img->x_chroma_shift;
yv12->subsampling_y = img->y_chroma_shift;
return VPX_CODEC_OK;
}
-#endif // VP10_VP10_IFACE_COMMON_H_
+#endif // AV1_AV1_IFACE_COMMON_H_
diff --git a/av1/common/alloccommon.c b/av1/common/alloccommon.c
index 0cd5e7c..79264df 100644
--- a/av1/common/alloccommon.c
+++ b/av1/common/alloccommon.c
@@ -18,7 +18,7 @@
#include "av1/common/entropymv.h"
#include "av1/common/onyxc_int.h"
-void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
+void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
@@ -31,7 +31,7 @@
cm->MBs = cm->mb_rows * cm->mb_cols;
}
-static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
+static int alloc_seg_map(AV1_COMMON *cm, int seg_map_size) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
@@ -51,7 +51,7 @@
return 0;
}
-static void free_seg_map(VP10_COMMON *cm) {
+static void free_seg_map(AV1_COMMON *cm) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
@@ -66,7 +66,7 @@
}
}
-void vp10_free_ref_frame_buffers(BufferPool *pool) {
+void av1_free_ref_frame_buffers(BufferPool *pool) {
int i;
for (i = 0; i < FRAME_BUFFERS; ++i) {
@@ -81,7 +81,7 @@
}
}
-void vp10_free_context_buffers(VP10_COMMON *cm) {
+void av1_free_context_buffers(AV1_COMMON *cm) {
cm->free_mi(cm);
free_seg_map(cm);
aom_free(cm->above_context);
@@ -90,10 +90,10 @@
cm->above_seg_context = NULL;
}
-int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
+int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
int new_mi_size;
- vp10_set_mb_mi(cm, width, height);
+ av1_set_mb_mi(cm, width, height);
new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
if (cm->mi_alloc_size < new_mi_size) {
cm->free_mi(cm);
@@ -123,12 +123,12 @@
return 0;
fail:
- vp10_free_context_buffers(cm);
+ av1_free_context_buffers(cm);
return 1;
}
-void vp10_remove_common(VP10_COMMON *cm) {
- vp10_free_context_buffers(cm);
+void av1_remove_common(AV1_COMMON *cm) {
+ av1_free_context_buffers(cm);
aom_free(cm->fc);
cm->fc = NULL;
@@ -136,13 +136,13 @@
cm->frame_contexts = NULL;
}
-void vp10_init_context_buffers(VP10_COMMON *cm) {
+void av1_init_context_buffers(AV1_COMMON *cm) {
cm->setup_mi(cm);
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
}
-void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) {
+void av1_swap_current_and_last_seg_map(AV1_COMMON *cm) {
// Swap indices.
const int tmp = cm->seg_map_idx;
cm->seg_map_idx = cm->prev_seg_map_idx;
diff --git a/av1/common/alloccommon.h b/av1/common/alloccommon.h
index 9370d4b..bbce0ad 100644
--- a/av1/common/alloccommon.h
+++ b/av1/common/alloccommon.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ALLOCCOMMON_H_
-#define VP10_COMMON_ALLOCCOMMON_H_
+#ifndef AV1_COMMON_ALLOCCOMMON_H_
+#define AV1_COMMON_ALLOCCOMMON_H_
#define INVALID_IDX -1 // Invalid buffer index.
@@ -18,26 +18,26 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
struct BufferPool;
-void vp10_remove_common(struct VP10Common *cm);
+void av1_remove_common(struct AV1Common *cm);
-int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height);
-void vp10_init_context_buffers(struct VP10Common *cm);
-void vp10_free_context_buffers(struct VP10Common *cm);
+int av1_alloc_context_buffers(struct AV1Common *cm, int width, int height);
+void av1_init_context_buffers(struct AV1Common *cm);
+void av1_free_context_buffers(struct AV1Common *cm);
-void vp10_free_ref_frame_buffers(struct BufferPool *pool);
+void av1_free_ref_frame_buffers(struct BufferPool *pool);
-int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height);
-void vp10_free_state_buffers(struct VP10Common *cm);
+int av1_alloc_state_buffers(struct AV1Common *cm, int width, int height);
+void av1_free_state_buffers(struct AV1Common *cm);
-void vp10_set_mb_mi(struct VP10Common *cm, int width, int height);
+void av1_set_mb_mi(struct AV1Common *cm, int width, int height);
-void vp10_swap_current_and_last_seg_map(struct VP10Common *cm);
+void av1_swap_current_and_last_seg_map(struct AV1Common *cm);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_ALLOCCOMMON_H_
+#endif // AV1_COMMON_ALLOCCOMMON_H_
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index d7f3e9c..f228f3a 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -140,7 +140,7 @@
return;
}
-void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
+void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
uint8x8_t d26u8, d27u8;
int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
@@ -157,7 +157,7 @@
switch (tx_type) {
case 0: // idct_idct is not supported. Fall back to C
- vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+ av1_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
return;
break;
case 1: // iadst_idct
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index f90d192..457b829 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -472,7 +472,7 @@
return;
}
-void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
+void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
int i;
uint8_t *d1, *d2;
@@ -495,7 +495,7 @@
switch (tx_type) {
case 0: // idct_idct is not supported. Fall back to C
- vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+ av1_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
return;
break;
case 1: // iadst_idct
diff --git a/av1/common/av1_fwd_txfm.c b/av1/common/av1_fwd_txfm.c
index ac68eb9..7ae3b32 100644
--- a/av1/common/av1_fwd_txfm.c
+++ b/av1/common/av1_fwd_txfm.c
@@ -11,7 +11,7 @@
#include "av1/common/av1_fwd_txfm.h"
-void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
}
}
-void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 4; ++r)
@@ -87,7 +87,7 @@
output[1] = 0;
}
-void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
int stride) {
int i, j;
tran_low_t intermediate[64];
@@ -173,7 +173,7 @@
}
}
-void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 8; ++r)
@@ -183,7 +183,7 @@
output[1] = 0;
}
-void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -363,7 +363,7 @@
}
}
-void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 16; ++r)
@@ -386,7 +386,7 @@
return rv;
}
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
tran_high_t step[32];
// Stage 1
step[0] = input[0] + input[(32 - 1)];
@@ -709,7 +709,7 @@
output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
}
-void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -717,7 +717,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -726,7 +726,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -736,7 +736,7 @@
// Note that although we use dct_32_round in dct32 computation flow,
// this 2d fdct32x32 for rate-distortion optimization loop is operating
// within 16 bits precision.
-void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -744,11 +744,11 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
// TODO(cd): see quality impact of only doing
// output[j * 32 + i] = (temp_out[j] + 1) >> 2;
- // PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c
+ // PS: also change code in av1_dsp/x86/av1_dct_sse2.c
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -756,12 +756,12 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vp10_fdct32(temp_in, temp_out, 1);
+ av1_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 32; ++r)
@@ -771,44 +771,44 @@
output[1] = 0;
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
int stride) {
- vp10_fdct4x4_c(input, output, stride);
+ av1_fdct4x4_c(input, output, stride);
}
-void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
int stride) {
- vp10_fdct8x8_c(input, final_output, stride);
+ av1_fdct8x8_c(input, final_output, stride);
}
-void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
int stride) {
- vp10_fdct8x8_1_c(input, final_output, stride);
+ av1_fdct8x8_1_c(input, final_output, stride);
}
-void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
int stride) {
- vp10_fdct16x16_c(input, output, stride);
+ av1_fdct16x16_c(input, output, stride);
}
-void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
int stride) {
- vp10_fdct16x16_1_c(input, output, stride);
+ av1_fdct16x16_1_c(input, output, stride);
}
-void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
int stride) {
- vp10_fdct32x32_c(input, out, stride);
+ av1_fdct32x32_c(input, out, stride);
}
-void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
int stride) {
- vp10_fdct32x32_rd_c(input, out, stride);
+ av1_fdct32x32_rd_c(input, out, stride);
}
-void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
int stride) {
- vp10_fdct32x32_1_c(input, out, stride);
+ av1_fdct32x32_1_c(input, out, stride);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_fwd_txfm.h b/av1/common/av1_fwd_txfm.h
index a8b17a3..db763e5 100644
--- a/av1/common/av1_fwd_txfm.h
+++ b/av1/common/av1_fwd_txfm.h
@@ -9,11 +9,11 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_VP10_FWD_TXFM_H_
-#define VP10_COMMON_VP10_FWD_TXFM_H_
+#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
+#define AV1_COMMON_AV1_FWD_TXFM_H_
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/fwd_txfm.h"
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif // VP10_COMMON_VP10_FWD_TXFM_H_
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif // AV1_COMMON_AV1_FWD_TXFM_H_
diff --git a/av1/common/av1_inv_txfm.c b/av1/common/av1_inv_txfm.c
index 072593a..0a7f6b9 100644
--- a/av1/common/av1_inv_txfm.c
+++ b/av1/common/av1_inv_txfm.c
@@ -13,7 +13,7 @@
#include "av1/common/av1_inv_txfm.h"
-void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
int i;
@@ -65,7 +65,7 @@
}
}
-void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
+void av1_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
int dest_stride) {
int i;
tran_high_t a1, e1;
@@ -92,7 +92,7 @@
}
}
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step[4];
tran_high_t temp1, temp2;
// stage 1
@@ -112,7 +112,7 @@
output[3] = WRAPLOW(step[0] - step[3], 8);
}
-void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
int i, j;
@@ -120,7 +120,7 @@
// Rows
for (i = 0; i < 4; ++i) {
- vp10_idct4_c(input, outptr);
+ av1_idct4_c(input, outptr);
input += 4;
outptr += 4;
}
@@ -128,7 +128,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_idct4_c(temp_in, temp_out);
+ av1_idct4_c(temp_in, temp_out);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 4));
@@ -136,7 +136,7 @@
}
}
-void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
int dest_stride) {
int i;
tran_high_t a1;
@@ -153,7 +153,7 @@
}
}
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
@@ -207,7 +207,7 @@
output[7] = WRAPLOW(step1[0] - step1[7], 8);
}
-void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
int i, j;
@@ -215,7 +215,7 @@
// First transform rows
for (i = 0; i < 8; ++i) {
- vp10_idct8_c(input, outptr);
+ av1_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
@@ -223,7 +223,7 @@
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_idct8_c(temp_in, temp_out);
+ av1_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -231,7 +231,7 @@
}
}
-void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8);
@@ -243,7 +243,7 @@
}
}
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
@@ -280,7 +280,7 @@
output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3), 8);
}
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output) {
int s0, s1, s2, s3, s4, s5, s6, s7;
tran_high_t x0 = input[7];
@@ -357,7 +357,7 @@
output[7] = WRAPLOW(-x1, 8);
}
-void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -366,7 +366,7 @@
// First transform rows
// only first 4 row has non-zero coefs
for (i = 0; i < 4; ++i) {
- vp10_idct8_c(input, outptr);
+ av1_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
@@ -374,7 +374,7 @@
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_idct8_c(temp_in, temp_out);
+ av1_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -382,7 +382,7 @@
}
}
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
@@ -547,7 +547,7 @@
output[15] = WRAPLOW(step2[0] - step2[15], 8);
}
-void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -556,7 +556,7 @@
// First transform rows
for (i = 0; i < 16; ++i) {
- vp10_idct16_c(input, outptr);
+ av1_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
@@ -564,7 +564,7 @@
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_idct16_c(temp_in, temp_out);
+ av1_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -572,7 +572,7 @@
}
}
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -743,7 +743,7 @@
output[15] = WRAPLOW(-x1, 8);
}
-void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -753,7 +753,7 @@
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
- vp10_idct16_c(input, outptr);
+ av1_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
@@ -761,7 +761,7 @@
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_idct16_c(temp_in, temp_out);
+ av1_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -769,7 +769,7 @@
}
}
-void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
int i, j;
tran_high_t a1;
@@ -782,7 +782,7 @@
}
}
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[32], step2[32];
tran_high_t temp1, temp2;
@@ -1149,7 +1149,7 @@
output[31] = WRAPLOW(step1[0] - step1[31], 8);
}
-void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
@@ -1168,7 +1168,7 @@
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
if (zero_coeff[0] | zero_coeff[1])
- vp10_idct32_c(input, outptr);
+ av1_idct32_c(input, outptr);
else
memset(outptr, 0, sizeof(tran_low_t) * 32);
input += 32;
@@ -1178,7 +1178,7 @@
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vp10_idct32_c(temp_in, temp_out);
+ av1_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1186,7 +1186,7 @@
}
}
-void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
@@ -1196,7 +1196,7 @@
// Rows
// only upper-left 8x8 has non-zero coeff
for (i = 0; i < 8; ++i) {
- vp10_idct32_c(input, outptr);
+ av1_idct32_c(input, outptr);
input += 32;
outptr += 32;
}
@@ -1204,7 +1204,7 @@
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vp10_idct32_c(temp_in, temp_out);
+ av1_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1212,7 +1212,7 @@
}
}
-void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
int i, j;
tran_high_t a1;
@@ -1227,8 +1227,8 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
@@ -1282,7 +1282,7 @@
}
}
-void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+void av1_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
int dest_stride, int bd) {
int i;
tran_high_t a1, e1;
@@ -1315,7 +1315,7 @@
}
}
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step[4];
tran_high_t temp1, temp2;
(void)bd;
@@ -1336,7 +1336,7 @@
output[3] = WRAPLOW(step[0] - step[3], bd);
}
-void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
@@ -1346,7 +1346,7 @@
// Rows
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct4_c(input, outptr, bd);
+ av1_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
@@ -1354,7 +1354,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_highbd_idct4_c(temp_in, temp_out, bd);
+ av1_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1362,7 +1362,7 @@
}
}
-void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
int dest_stride, int bd) {
int i;
tran_high_t a1;
@@ -1382,7 +1382,7 @@
}
}
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
@@ -1400,7 +1400,7 @@
step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
// stage 2 & stage 3 - even half
- vp10_highbd_idct4_c(step1, step1, bd);
+ av1_highbd_idct4_c(step1, step1, bd);
// stage 2 - odd half
step2[4] = WRAPLOW(step1[4] + step1[5], bd);
@@ -1427,7 +1427,7 @@
output[7] = WRAPLOW(step1[0] - step1[7], bd);
}
-void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
@@ -1437,7 +1437,7 @@
// First transform rows.
for (i = 0; i < 8; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -1445,7 +1445,7 @@
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1453,7 +1453,7 @@
}
}
-void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
@@ -1468,7 +1468,7 @@
}
}
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
@@ -1506,7 +1506,7 @@
output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd);
}
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[7];
@@ -1583,7 +1583,7 @@
output[7] = WRAPLOW(-x1, bd);
}
-void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -1594,14 +1594,14 @@
// First transform rows.
// Only first 4 row has non-zero coefs.
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1609,7 +1609,7 @@
}
}
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
(void)bd;
@@ -1775,7 +1775,7 @@
output[15] = WRAPLOW(step2[0] - step2[15], bd);
}
-void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -1785,7 +1785,7 @@
// First transform rows.
for (i = 0; i < 16; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -1793,7 +1793,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1801,7 +1801,7 @@
}
}
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -1972,7 +1972,7 @@
output[15] = WRAPLOW(-x1, bd);
}
-void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -1983,7 +1983,7 @@
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -1991,7 +1991,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1999,7 +1999,7 @@
}
}
-void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
@@ -2384,7 +2384,7 @@
output[31] = WRAPLOW(step1[0] - step1[31], bd);
}
-void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
@@ -2422,7 +2422,7 @@
}
}
-void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
@@ -2448,7 +2448,7 @@
}
}
-void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
int a1;
@@ -2464,4 +2464,4 @@
dest += stride;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_inv_txfm.h b/av1/common/av1_inv_txfm.h
index 032d7f1..eb5254e 100644
--- a/av1/common/av1_inv_txfm.h
+++ b/av1/common/av1_inv_txfm.h
@@ -41,7 +41,7 @@
return check_range(rv);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
#if CONFIG_COEFFICIENT_RANGE_CHECKING
// For valid highbitdepth streams, intermediate stage coefficients will
@@ -64,7 +64,7 @@
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
return highbd_check_range(rv, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EMULATE_HARDWARE
// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -88,22 +88,22 @@
#define WRAPLOW(x, bd) ((int32_t)(x))
#endif // CONFIG_EMULATE_HARDWARE
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
int bd) {
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 094da4e..afa6820 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -1,7 +1,7 @@
-sub vp10_common_forward_decls() {
+sub av1_common_forward_decls() {
print <<EOF
/*
- * VP10
+ * AV1
*/
#include "aom/aom_integer.h"
@@ -19,7 +19,7 @@
struct yv12_buffer_config;
EOF
}
-forward_decls qw/vp10_common_forward_decls/;
+forward_decls qw/av1_common_forward_decls/;
# x86inc.asm had specific constraints. break it out so it's easy to disable.
# zero all the variables to avoid tricky else conditions.
@@ -57,314 +57,314 @@
#
# dct
#
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# Note as optimized versions of these functions are added we need to add a check to ensure
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add/;
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add/;
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add/;
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add/;
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add/;
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add/;
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4/;
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4/;
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1/;
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1/;
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8/;
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8/;
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1/;
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1/;
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16/;
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16/;
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1/;
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1/;
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32/;
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32/;
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd/;
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd/;
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1/;
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1/;
- add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct4x4/;
+ add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct4x4/;
- add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8/;
+ add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8/;
- add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8_1/;
+ add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8_1/;
- add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16/;
+ add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16/;
- add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16_1/;
+ add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16_1/;
- add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32/;
+ add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32/;
- add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_rd/;
+ add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_rd/;
- add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_1/;
+ add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_1/;
} else {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add sse2/;
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add sse2/;
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add sse2/;
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add sse2/;
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add/;
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add/;
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4 sse2/;
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4 sse2/;
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1 sse2/;
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1 sse2/;
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8 sse2/;
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8 sse2/;
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1 sse2/;
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1 sse2/;
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16 sse2/;
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16 sse2/;
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1 sse2/;
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1 sse2/;
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32 sse2/;
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32 sse2/;
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd sse2/;
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd sse2/;
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1 sse2/;
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1 sse2/;
- add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct4x4 sse2/;
+ add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct4x4 sse2/;
- add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8 sse2/;
+ add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8 sse2/;
- add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct8x8_1/;
+ add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct8x8_1/;
- add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16 sse2/;
+ add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16 sse2/;
- add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct16x16_1/;
+ add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct16x16_1/;
- add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32 sse2/;
+ add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32 sse2/;
- add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_rd sse2/;
+ add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_rd sse2/;
- add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fdct32x32_1/;
+ add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fdct32x32_1/;
}
} else {
# Force C versions if CONFIG_EMULATE_HARDWARE is 1
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add/;
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add/;
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add/;
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add/;
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add/;
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add/;
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4/;
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4/;
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1/;
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1/;
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8/;
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8/;
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1/;
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1/;
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16/;
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16/;
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1/;
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1/;
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32/;
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32/;
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd/;
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd/;
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1/;
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1/;
} else {
- add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht4x4_16_add sse2 neon dspr2 msa/;
+ add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht4x4_16_add sse2 neon dspr2 msa/;
- add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
- specialize qw/vp10_iht8x8_64_add sse2 neon dspr2 msa/;
+ add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+ specialize qw/av1_iht8x8_64_add sse2 neon dspr2 msa/;
- add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
- specialize qw/vp10_iht16x16_256_add sse2 dspr2 msa/;
+ add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+ specialize qw/av1_iht16x16_256_add sse2 dspr2 msa/;
- add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4 sse2/;
+ add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4 sse2/;
- add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct4x4_1 sse2/;
+ add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct4x4_1 sse2/;
- add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8 sse2/;
+ add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8 sse2/;
- add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct8x8_1 sse2/;
+ add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct8x8_1 sse2/;
- add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16 sse2/;
+ add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16 sse2/;
- add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct16x16_1 sse2/;
+ add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct16x16_1 sse2/;
- add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32 sse2/;
+ add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32 sse2/;
- add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_rd sse2/;
+ add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_rd sse2/;
- add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fdct32x32_1 sse2/;
+ add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fdct32x32_1 sse2/;
}
}
# High bitdepth functions
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
#
# Sub Pixel Filters
#
- add_proto qw/void vp10_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve_copy/;
+ add_proto qw/void av1_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve_copy/;
- add_proto qw/void vp10_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve_avg/;
+ add_proto qw/void av1_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve_avg/;
- add_proto qw/void vp10_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8/, "$sse2_x86_64";
- add_proto qw/void vp10_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_horiz/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_horiz/, "$sse2_x86_64";
- add_proto qw/void vp10_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_vert/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
- add_proto qw/void vp10_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
- add_proto qw/void vp10_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
- add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+ add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
#
# dct
#
# Note as optimized versions of these functions are added we need to add a check to ensure
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- add_proto qw/void vp10_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht4x4_16_add/;
+ add_proto qw/void av1_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht4x4_16_add/;
- add_proto qw/void vp10_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
- specialize qw/vp10_highbd_iht8x8_64_add/;
+ add_proto qw/void av1_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+ specialize qw/av1_highbd_iht8x8_64_add/;
- add_proto qw/void vp10_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
- specialize qw/vp10_highbd_iht16x16_256_add/;
+ add_proto qw/void av1_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
+ specialize qw/av1_highbd_iht16x16_256_add/;
}
#
# Encoder functions below this point.
#
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
# ENCODEMB INVOKE
if (aom_config("CONFIG_AOM_QM") eq "yes") {
- if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# the transform coefficients are held in 32-bit
- # values, so the assembler code for vp10_block_error can no longer be used.
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error/;
+ # values, so the assembler code for av1_block_error can no longer be used.
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error/;
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- specialize qw/vp10_fdct8x8_quant/;
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ specialize qw/av1_fdct8x8_quant/;
} else {
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
- add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
- specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
+ add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+ specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
}
} else {
- if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# the transform coefficients are held in 32-bit
- # values, so the assembler code for vp10_block_error can no longer be used.
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error/;
+ # values, so the assembler code for av1_block_error can no longer be used.
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error/;
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp/;
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp/;
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp_32x32/;
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp_32x32/;
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_fdct8x8_quant/;
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_fdct8x8_quant/;
} else {
- add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
- specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
+ add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+ specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
- add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
- specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
+ add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+ specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
- add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp neon sse2/, "$ssse3_x86_64_x86inc";
+ add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp neon sse2/, "$ssse3_x86_64_x86inc";
- add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_quantize_fp_32x32/, "$ssse3_x86_64_x86inc";
+ add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_quantize_fp_32x32/, "$ssse3_x86_64_x86inc";
- add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_fdct8x8_quant sse2 ssse3 neon/;
+ add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_fdct8x8_quant sse2 ssse3 neon/;
}
}
@@ -372,269 +372,269 @@
# fdct functions
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x4 sse2/;
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x4 sse2/;
- add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x8 sse2/;
+ add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x8 sse2/;
- add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x16 sse2/;
+ add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x16 sse2/;
- add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fwht4x4/, "$mmx_x86inc";
+ add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fwht4x4/, "$mmx_x86inc";
} else {
- add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht4x4 sse2 msa/;
+ add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht4x4 sse2 msa/;
- add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht8x8 sse2 msa/;
+ add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht8x8 sse2 msa/;
- add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_fht16x16 sse2 msa/;
+ add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_fht16x16 sse2 msa/;
- add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_fwht4x4 msa/, "$mmx_x86inc";
+ add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_fwht4x4 msa/, "$mmx_x86inc";
}
# Inverse transform
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# Note as optimized versions of these functions are added we need to add a check to ensure
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add/;
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add/;
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add/;
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add/;
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add/;
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add/;
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add/;
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add/;
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add/;
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add/;
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add/;
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add/;
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add/;
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add/;
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add/;
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add/;
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add/;
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add/;
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add/;
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add/;
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add/;
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add/;
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
- add_proto qw/void vp10_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_1_add/;
+ add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_1_add/;
- add_proto qw/void vp10_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_1_add/;
+ add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_1_add/;
- add_proto qw/void vp10_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_1_add/;
+ add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_1_add/;
- add_proto qw/void vp10_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_1024_add/;
+ add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_1024_add/;
- add_proto qw/void vp10_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_34_add/;
+ add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_34_add/;
- add_proto qw/void vp10_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct32x32_1_add/;
+ add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct32x32_1_add/;
- add_proto qw/void vp10_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_iwht4x4_1_add/;
+ add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_iwht4x4_1_add/;
- add_proto qw/void vp10_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_iwht4x4_16_add/;
+ add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_iwht4x4_16_add/;
# Force C versions if CONFIG_EMULATE_HARDWARE is 1
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_16_add/;
+ add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_16_add/;
- add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_64_add/;
+ add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_64_add/;
- add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_10_add/;
+ add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_10_add/;
- add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_256_add/;
+ add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_256_add/;
- add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_10_add/;
+ add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_10_add/;
} else {
- add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct4x4_16_add sse2/;
+ add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct4x4_16_add sse2/;
- add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_64_add sse2/;
+ add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_64_add sse2/;
- add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct8x8_10_add sse2/;
+ add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct8x8_10_add sse2/;
- add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_256_add sse2/;
+ add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_256_add sse2/;
- add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vp10_highbd_idct16x16_10_add sse2/;
+ add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/av1_highbd_idct16x16_10_add sse2/;
} # CONFIG_EMULATE_HARDWARE
} else {
# Force C versions if CONFIG_EMULATE_HARDWARE is 1
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add/;
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add/;
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add/;
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add/;
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add/;
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add/;
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add/;
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add/;
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add/;
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add/;
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add/;
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add/;
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add/;
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add/;
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add/;
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add/;
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add/;
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add/;
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add/;
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add/;
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add/;
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add/;
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
} else {
- add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_1_add sse2/;
+ add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_1_add sse2/;
- add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct4x4_16_add sse2/;
+ add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct4x4_16_add sse2/;
- add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_1_add sse2/;
+ add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_1_add sse2/;
- add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_64_add sse2/;
+ add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_64_add sse2/;
- add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct8x8_12_add sse2/;
+ add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct8x8_12_add sse2/;
- add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_1_add sse2/;
+ add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_1_add sse2/;
- add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_256_add sse2/;
+ add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_256_add sse2/;
- add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct16x16_10_add sse2/;
+ add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct16x16_10_add sse2/;
- add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1024_add sse2/;
+ add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1024_add sse2/;
- add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_34_add sse2/;
+ add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_34_add sse2/;
- add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_idct32x32_1_add sse2/;
+ add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_idct32x32_1_add sse2/;
- add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_1_add/;
+ add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_1_add/;
- add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vp10_iwht4x4_16_add/;
+ add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/av1_iwht4x4_16_add/;
} # CONFIG_EMULATE_HARDWARE
-} # CONFIG_VPX_HIGHBITDEPTH
+} # CONFIG_AOM_HIGHBITDEPTH
#
# Motion search
#
-add_proto qw/int vp10_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
-specialize qw/vp10_full_search_sad sse3 sse4_1/;
-$vp10_full_search_sad_sse3=vp10_full_search_sadx3;
-$vp10_full_search_sad_sse4_1=vp10_full_search_sadx8;
+add_proto qw/int av1_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
+specialize qw/av1_full_search_sad sse3 sse4_1/;
+$av1_full_search_sad_sse3=av1_full_search_sadx3;
+$av1_full_search_sad_sse4_1=av1_full_search_sadx8;
-add_proto qw/int vp10_diamond_search_sad/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_diamond_search_sad/;
+add_proto qw/int av1_diamond_search_sad/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_diamond_search_sad/;
-add_proto qw/int vp10_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_full_range_search/;
+add_proto qw/int av1_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_full_range_search/;
-add_proto qw/void vp10_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-specialize qw/vp10_temporal_filter_apply sse2 msa/;
+add_proto qw/void av1_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+specialize qw/av1_temporal_filter_apply sse2 msa/;
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
# ENCODEMB INVOKE
- add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
- specialize qw/vp10_highbd_block_error sse2/;
+ add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
+ specialize qw/av1_highbd_block_error sse2/;
if (aom_config("CONFIG_AOM_QM") eq "yes") {
- add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
} else {
- add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_highbd_quantize_fp/;
+ add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_highbd_quantize_fp/;
- add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp10_highbd_quantize_fp_32x32/;
+ add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/av1_highbd_quantize_fp_32x32/;
}
# fdct functions
- add_proto qw/void vp10_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht4x4/;
+ add_proto qw/void av1_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht4x4/;
- add_proto qw/void vp10_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht8x8/;
+ add_proto qw/void av1_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht8x8/;
- add_proto qw/void vp10_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
- specialize qw/vp10_highbd_fht16x16/;
+ add_proto qw/void av1_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+ specialize qw/av1_highbd_fht16x16/;
- add_proto qw/void vp10_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vp10_highbd_fwht4x4/;
+ add_proto qw/void av1_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/av1_highbd_fwht4x4/;
- add_proto qw/void vp10_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
- specialize qw/vp10_highbd_temporal_filter_apply/;
+ add_proto qw/void av1_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+ specialize qw/av1_highbd_temporal_filter_apply/;
}
-# End vp10_high encoder functions
+# End av1_high encoder functions
}
# end encoder functions
diff --git a/av1/common/blockd.c b/av1/common/blockd.c
index 1ea443a..2ebee03 100644
--- a/av1/common/blockd.c
+++ b/av1/common/blockd.c
@@ -11,7 +11,7 @@
#include "av1/common/blockd.h"
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b) {
if (b == 0 || b == 2) {
if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
@@ -23,7 +23,7 @@
}
}
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *above_mi, int b) {
if (b == 0 || b == 1) {
if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
@@ -35,7 +35,7 @@
}
}
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -74,17 +74,17 @@
}
}
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
BLOCK_SIZE bsize,
foreach_transformed_block_visitor visit,
void *arg) {
int plane;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
}
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
int aoff, int loff) {
ENTROPY_CONTEXT *const a = pd->above_context + aoff;
@@ -121,7 +121,7 @@
}
}
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index c57827a..5884182 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_BLOCKD_H_
-#define VP10_COMMON_BLOCKD_H_
+#ifndef AV1_COMMON_BLOCKD_H_
+#define AV1_COMMON_BLOCKD_H_
#include "./aom_config.h"
@@ -110,10 +110,10 @@
return mbmi->ref_frame[1] > INTRA_FRAME;
}
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b);
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *above_mi, int b);
enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
@@ -154,7 +154,7 @@
typedef struct RefBuffer {
// TODO(dkovalev): idx is not really required and should be removed, now it
- // is used in vp10_onyxd_if.c
+ // is used in av1_onyxd_if.c
int idx;
YV12_BUFFER_CONFIG *buf;
struct scale_factors sf;
@@ -199,7 +199,7 @@
PARTITION_CONTEXT *above_seg_context;
PARTITION_CONTEXT left_seg_context[8];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
/* Bit depth: 8, 10, 12 */
int bd;
#endif
@@ -241,7 +241,7 @@
return mbmi->tx_type;
}
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
int xss, int yss) {
@@ -281,16 +281,16 @@
BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg);
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg);
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
BLOCK_SIZE bsize,
foreach_transformed_block_visitor visit,
void *arg);
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
int aoff, int loff);
@@ -298,4 +298,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_BLOCKD_H_
+#endif // AV1_COMMON_BLOCKD_H_
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index 38b6d8c..0399e0f 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -33,8 +33,8 @@
#define BS MI_SIZE *MI_BLOCK_SIZE
// Iterate over blocks within a superblock
-static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
- const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void av1_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
+ const AV1_COMMON *cm, MACROBLOCKD *xd,
MODE_INFO *const *mi_8x8, int xpos, int ypos) {
// Temporary buffer (to allow SIMD parallelism)
uint8_t buf_unaligned[BS * BS + 15];
@@ -61,7 +61,7 @@
has_bottom &= y != MI_BLOCK_SIZE - 1;
has_right &= x != MI_BLOCK_SIZE - 1;
#endif
- vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+ av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
clpf_block(
xd->plane[p].dst.buf, CLPF_ALLOW_PIXEL_PARALLELISM
? buf + y * MI_SIZE * BS + x * MI_SIZE
@@ -79,7 +79,7 @@
for (x = 0; x < MI_BLOCK_SIZE && xpos + x < cm->mi_cols; x++) {
const MB_MODE_INFO *mbmi =
&mi_8x8[(ypos + y) * cm->mi_stride + xpos + x]->mbmi;
- vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+ av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
if (!mbmi->skip) {
int i = 0;
for (i = 0; i<MI_SIZE>> xd->plane[p].subsampling_y; i++)
@@ -94,11 +94,11 @@
}
// Iterate over the superblocks of an entire frame
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
MACROBLOCKD *xd) {
int x, y;
for (y = 0; y < cm->mi_rows; y += MI_BLOCK_SIZE)
for (x = 0; x < cm->mi_cols; x += MI_BLOCK_SIZE)
- vp10_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
+ av1_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
}
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index 683b25f..d4587f3 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -8,8 +8,8 @@
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_CLPF_H_
-#define VP10_COMMON_CLPF_H_
+#ifndef AV1_COMMON_CLPF_H_
+#define AV1_COMMON_CLPF_H_
#include "av1/common/reconinter.h"
@@ -21,7 +21,7 @@
#define CLPF_FILTER_ALL_PLANES \
0 // 1 = filter both luma and chroma, 0 = filter only luma
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
MACROBLOCKD *xd);
#endif
diff --git a/av1/common/common.h b/av1/common/common.h
index e97e15b..f39f4f8 100644
--- a/av1/common/common.h
+++ b/av1/common/common.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_COMMON_H_
-#define VP10_COMMON_COMMON_H_
+#ifndef AV1_COMMON_COMMON_H_
+#define AV1_COMMON_COMMON_H_
/* Interface header for common constant data structures and lookup tables */
@@ -27,21 +27,21 @@
#endif
// Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src) \
+#define av1_copy(dest, src) \
{ \
assert(sizeof(dest) == sizeof(src)); \
memcpy(dest, src, sizeof(src)); \
}
// Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n) \
+#define av1_copy_array(dest, src, n) \
{ \
assert(sizeof(*dest) == sizeof(*src)); \
memcpy(dest, src, n * sizeof(*src)); \
}
-#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
+#define av1_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
static INLINE int get_unsigned_bits(unsigned int num_values) {
return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -66,9 +66,9 @@
} while (0)
#endif
// TODO(yaowu: validate the usage of these codes or develop new ones.)
-#define VP10_SYNC_CODE_0 0x49
-#define VP10_SYNC_CODE_1 0x83
-#define VP10_SYNC_CODE_2 0x43
+#define AV1_SYNC_CODE_0 0x49
+#define AV1_SYNC_CODE_1 0x83
+#define AV1_SYNC_CODE_2 0x43
#define VPX_FRAME_MARKER 0x2
@@ -76,4 +76,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_COMMON_H_
+#endif // AV1_COMMON_COMMON_H_
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index d0885f1..a1db52a 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_COMMON_DATA_H_
-#define VP10_COMMON_COMMON_DATA_H_
+#ifndef AV1_COMMON_COMMON_DATA_H_
+#define AV1_COMMON_COMMON_DATA_H_
#include "av1/common/enums.h"
#include "aom/aom_integer.h"
@@ -175,4 +175,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_COMMON_DATA_H_
+#endif // AV1_COMMON_COMMON_DATA_H_
diff --git a/av1/common/debugmodes.c b/av1/common/debugmodes.c
index 0052fc4..d7b31c1 100644
--- a/av1/common/debugmodes.c
+++ b/av1/common/debugmodes.c
@@ -14,7 +14,7 @@
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
-static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
+static void log_frame_info(AV1_COMMON *cm, const char *str, FILE *f) {
fprintf(f, "%s", str);
fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
cm->show_frame, cm->base_qindex);
@@ -23,7 +23,7 @@
* and uses the passed in member offset to print out the value of an integer
* for each mbmi member value in the mi structure.
*/
-static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
+static void print_mi_data(AV1_COMMON *cm, FILE *file, const char *descriptor,
size_t member_offset) {
int mi_row, mi_col;
MODE_INFO **mi = cm->mi_grid_visible;
@@ -44,7 +44,7 @@
fprintf(file, "\n");
}
-void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
+void av1_print_modes_and_motion_vectors(AV1_COMMON *cm, const char *file) {
int mi_row;
int mi_col;
FILE *mvs = fopen(file, "a");
diff --git a/av1/common/dering.c b/av1/common/dering.c
index 948e77e..0717e68 100644
--- a/av1/common/dering.c
+++ b/av1/common/dering.c
@@ -28,7 +28,7 @@
return clamp(level, gi, MAX_DERING_LEVEL-1);
}
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
int r, c;
int maxc, maxr;
int skip = 1;
@@ -46,7 +46,7 @@
return skip;
}
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
MACROBLOCKD *xd, int global_level) {
int r, c;
int sbr, sbc;
@@ -62,7 +62,7 @@
nvsb = (cm->mi_rows + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
nhsb = (cm->mi_cols + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
bskip = aom_malloc(sizeof(*bskip)*cm->mi_rows*cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+ av1_setup_dst_planes(xd->plane, frame, 0, 0);
for (pli = 0; pli < 3; pli++) {
dec[pli] = xd->plane[pli].subsampling_x;
bsize[pli] = 8 >> dec[pli];
@@ -72,7 +72,7 @@
src[pli] = aom_malloc(sizeof(*src)*cm->mi_rows*cm->mi_cols*64);
for (r = 0; r < bsize[pli]*cm->mi_rows; ++r) {
for (c = 0; c < bsize[pli]*cm->mi_cols; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
src[pli][r * stride + c] =
CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
@@ -81,7 +81,7 @@
#endif
src[pli][r * stride + c] =
xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
@@ -127,7 +127,7 @@
cm->mi_cols, threshold, OD_DERING_NO_CHECK_OVERLAP, coeff_shift);
for (r = 0; r < bsize[pli]*nvb; ++r) {
for (c = 0; c < bsize[pli]*nhb; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
[xd->plane[pli].dst.stride*(bsize[pli]*MI_BLOCK_SIZE*sbr + r)
@@ -139,7 +139,7 @@
(bsize[pli]*MI_BLOCK_SIZE*sbr + r) +
sbc*bsize[pli]*MI_BLOCK_SIZE + c] =
dst[r * MI_BLOCK_SIZE * bsize[pli] + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
diff --git a/av1/common/dering.h b/av1/common/dering.h
index 98a6f93..a46e207 100644
--- a/av1/common/dering.h
+++ b/av1/common/dering.h
@@ -8,8 +8,8 @@
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_DERING_H_
-#define VP10_COMMON_DERING_H_
+#ifndef AV1_COMMON_DERING_H_
+#define AV1_COMMON_DERING_H_
#include "av1/common/od_dering.h"
#include "av1/common/onyxc_int.h"
@@ -29,15 +29,15 @@
#define DERING_REFINEMENT_LEVELS 4
int compute_level_from_index(int global_level, int gi);
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col);
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col);
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
MACROBLOCKD *xd, int global_level);
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
- VP10_COMMON *cm,
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+ AV1_COMMON *cm,
MACROBLOCKD *xd);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_DERING_H_
+#endif // AV1_COMMON_DERING_H_
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index f013376..d433bfb 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -17,7 +17,7 @@
#include "aom/aom_integer.h"
// Unconstrained Node Tree
-const aom_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
2,
6, // 0 = LOW_VAL
-TWO_TOKEN,
@@ -36,33 +36,33 @@
-CATEGORY6_TOKEN // 7 = CAT_FIVE
};
-const aom_prob vp10_cat1_prob[] = { 159 };
-const aom_prob vp10_cat2_prob[] = { 165, 145 };
-const aom_prob vp10_cat3_prob[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+const aom_prob av1_cat1_prob[] = { 159 };
+const aom_prob av1_cat2_prob[] = { 165, 145 };
+const aom_prob av1_cat3_prob[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
196, 177, 153, 140, 133, 130, 129 };
-#if CONFIG_VPX_HIGHBITDEPTH
-const aom_prob vp10_cat1_prob_high10[] = { 159 };
-const aom_prob vp10_cat2_prob_high10[] = { 165, 145 };
-const aom_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob_high10[] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const aom_prob av1_cat1_prob_high10[] = { 159 };
+const aom_prob av1_cat2_prob_high10[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high10[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high10[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high10[] = {
255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
};
-const aom_prob vp10_cat1_prob_high12[] = { 159 };
-const aom_prob vp10_cat2_prob_high12[] = { 165, 145 };
-const aom_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+const aom_prob av1_cat1_prob_high12[] = { 159 };
+const aom_prob av1_cat2_prob_high12[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high12[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high12[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
254, 252, 249, 243, 230, 196,
177, 153, 140, 133, 130, 129 };
#endif
-const uint8_t vp10_coefband_trans_8x8plus[1024] = {
+const uint8_t av1_coefband_trans_8x8plus[1024] = {
0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
// beyond MAXBAND_INDEX+1 all values are filled as 5
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -106,11 +106,11 @@
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
};
-const uint8_t vp10_coefband_trans_4x4[16] = {
+const uint8_t av1_coefband_trans_4x4[16] = {
0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
};
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+const uint8_t av1_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
4, 5, 5, 5, 5, 5 };
// Model obtained from a 2-sided zero-centerd distribuition derived
@@ -125,9 +125,9 @@
// Every odd line in this table can be generated from the even lines
// by averaging :
-// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
-// vp10_pareto8_full[l+1][node] ) >> 1;
-const aom_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
+// av1_pareto8_full[l][node] = (av1_pareto8_full[l-1][node] +
+// av1_pareto8_full[l+1][node] ) >> 1;
+const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
{ 3, 86, 128, 6, 86, 23, 88, 29 },
{ 6, 86, 128, 11, 87, 42, 91, 52 },
{ 9, 86, 129, 17, 88, 61, 94, 76 },
@@ -385,7 +385,7 @@
{ 255, 246, 247, 255, 239, 255, 253, 255 },
};
-static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -550,7 +550,7 @@
{ 8, 23, 61 } } } }
};
-static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -715,7 +715,7 @@
{ 1, 23, 41 } } } }
};
-static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -880,7 +880,7 @@
{ 1, 17, 31 } } } }
};
-static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
{ // Y plane
{ // Intra
{ // Band 0
@@ -1048,21 +1048,21 @@
static void extend_to_full_distribution(aom_prob *probs, aom_prob p) {
// TODO(aconverse): model[PIVOT_NODE] should never be zero.
// https://code.google.com/p/webm/issues/detail?id=1089
- memcpy(probs, vp10_pareto8_full[p == 0 ? 254 : p - 1],
+ memcpy(probs, av1_pareto8_full[p == 0 ? 254 : p - 1],
MODEL_NODES * sizeof(aom_prob));
}
-void vp10_model_to_full_probs(const aom_prob *model, aom_prob *full) {
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full) {
if (full != model)
memcpy(full, model, sizeof(aom_prob) * UNCONSTRAINED_NODES);
extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
}
-void vp10_default_coef_probs(VP10_COMMON *cm) {
- vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
- vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
- vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
- vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
+void av1_default_coef_probs(AV1_COMMON *cm) {
+ av1_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
+ av1_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
+ av1_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
+ av1_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
}
#define COEF_COUNT_SAT 24
@@ -1072,13 +1072,13 @@
#define COEF_COUNT_SAT_AFTER_KEY 24
#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
-static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
+static void adapt_coef_probs(AV1_COMMON *cm, TX_SIZE tx_size,
unsigned int count_sat,
unsigned int update_factor) {
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
- vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
- const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
- vp10_coeff_count_model *counts = cm->counts.coef[tx_size];
+ av1_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
+ const av1_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
+ av1_coeff_count_model *counts = cm->counts.coef[tx_size];
unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cm->counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -1101,7 +1101,7 @@
}
}
-void vp10_adapt_coef_probs(VP10_COMMON *cm) {
+void av1_adapt_coef_probs(AV1_COMMON *cm) {
TX_SIZE t;
unsigned int count_sat, update_factor;
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index 21a0987..fdfccf3 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ENTROPY_H_
-#define VP10_COMMON_ENTROPY_H_
+#ifndef AV1_COMMON_ENTROPY_H_
+#define AV1_COMMON_ENTROPY_H_
#include "aom/aom_integer.h"
#include "aom_dsp/prob.h"
@@ -43,7 +43,7 @@
#define ENTROPY_NODES 11
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_pt_energy_class[ENTROPY_TOKENS]);
#define CAT1_MIN_VAL 5
#define CAT2_MIN_VAL 7
@@ -53,27 +53,27 @@
#define CAT6_MIN_VAL 67
// Extra bit probabilities.
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob[14]);
-#if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high10[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high10[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high10[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high10[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high10[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high10[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high12[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high12[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high12[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high12[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high12[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high12[18]);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define EOB_MODEL_TOKEN 3
@@ -83,20 +83,20 @@
int len;
int base_val;
const int16_t *cost;
-} vp10_extra_bit;
+} av1_extra_bit;
// indexed by token value
-extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS];
-#if CONFIG_VPX_HIGHBITDEPTH
-extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
-extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS];
+#if CONFIG_AOM_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS];
+extern const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS];
+#endif // CONFIG_AOM_HIGHBITDEPTH
#define DCT_MAX_VALUE 16384
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_MAX_VALUE_HIGH10 65536
#define DCT_MAX_VALUE_HIGH12 262144
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* Coefficients are predicted via a 3-dimensional probability table. */
@@ -127,28 +127,28 @@
// #define ENTROPY_STATS
typedef unsigned int
- vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+ av1_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef unsigned int
- vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
+ av1_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
#define SUBEXP_PARAM 4 /* Subexponential code parameter */
#define MODULUS_PARAM 13 /* Modulus parameter */
-struct VP10Common;
-void vp10_default_coef_probs(struct VP10Common *cm);
-void vp10_adapt_coef_probs(struct VP10Common *cm);
+struct AV1Common;
+void av1_default_coef_probs(struct AV1Common *cm);
+void av1_adapt_coef_probs(struct AV1Common *cm);
// This is the index in the scan order beyond which all coefficients for
// 8x8 transform and above are in the top band.
// This macro is currently unused but may be used by certain implementations
#define MAXBAND_INDEX 21
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_8x8plus[1024]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x4[16]);
static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
- return tx_size == TX_4X4 ? vp10_coefband_trans_4x4
- : vp10_coefband_trans_8x8plus;
+ return tx_size == TX_4X4 ? av1_coefband_trans_4x4
+ : av1_coefband_trans_8x8plus;
}
// 128 lists of probabilities are stored for the following ONE node probs:
@@ -162,16 +162,16 @@
#define PIVOT_NODE 2 // which node is pivot
#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
-extern const aom_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
-extern const aom_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
+extern const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
+extern const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
-typedef aom_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+typedef aom_prob av1_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
[UNCONSTRAINED_NODES];
-typedef unsigned int vp10_coeff_count_model
+typedef unsigned int av1_coeff_count_model
[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
-void vp10_model_to_full_probs(const aom_prob *model, aom_prob *full);
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full);
typedef char ENTROPY_CONTEXT;
@@ -211,4 +211,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPY_H_
+#endif // AV1_COMMON_ENTROPY_H_
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 0753288..97c542e 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -14,7 +14,7 @@
#include "av1/common/onyxc_int.h"
#include "av1/common/seg_common.h"
-const aom_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
+const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
{ {
// above = dc
{ 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc
@@ -147,7 +147,7 @@
} };
#if !CONFIG_MISC_FIXES
-const aom_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
+const aom_prob av1_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
{ 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc
{ 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v
{ 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h
@@ -183,7 +183,7 @@
#if !CONFIG_MISC_FIXES
const aom_prob
- vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
+ av1_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
{ 158, 97, 94 }, // a/l both not split
{ 93, 24, 99 }, // a split, l not split
@@ -243,7 +243,7 @@
};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const aom_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
+const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
-DC_PRED, 2, /* 0 = DC_NODE */
-TM_PRED, 4, /* 1 = TM_NODE */
-V_PRED, 6, /* 2 = V_NODE */
@@ -255,12 +255,12 @@
-D153_PRED, -D207_PRED /* 8 = D153_NODE */
};
-const aom_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
+const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
-INTER_OFFSET(ZEROMV), 2, -INTER_OFFSET(NEARESTMV), 4, -INTER_OFFSET(NEARMV),
-INTER_OFFSET(NEWMV)
};
-const aom_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
+const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
-PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
};
@@ -286,7 +286,7 @@
{ { 100 }, { 66 } } };
-void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
+void av1_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
unsigned int (*ct_32x32p)[2]) {
ct_32x32p[0][0] = tx_count_32x32p[TX_4X4];
ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + tx_count_32x32p[TX_16X16] +
@@ -297,7 +297,7 @@
ct_32x32p[2][1] = tx_count_32x32p[TX_32X32];
}
-void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
+void av1_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
unsigned int (*ct_16x16p)[2]) {
ct_16x16p[0][0] = tx_count_16x16p[TX_4X4];
ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] + tx_count_16x16p[TX_16X16];
@@ -305,7 +305,7 @@
ct_16x16p[1][1] = tx_count_16x16p[TX_16X16];
}
-void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
+void av1_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
unsigned int (*ct_8x8p)[2]) {
ct_8x8p[0][0] = tx_count_8x8p[TX_4X4];
ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
@@ -325,7 +325,7 @@
};
#endif
-const aom_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
+const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
-DCT_DCT, 2, -ADST_ADST, 4, -ADST_DCT, -DCT_ADST
};
@@ -341,29 +341,29 @@
};
static void init_mode_probs(FRAME_CONTEXT *fc) {
- vp10_copy(fc->uv_mode_prob, default_uv_probs);
- vp10_copy(fc->y_mode_prob, default_if_y_probs);
- vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
- vp10_copy(fc->partition_prob, default_partition_probs);
- vp10_copy(fc->intra_inter_prob, default_intra_inter_p);
- vp10_copy(fc->comp_inter_prob, default_comp_inter_p);
- vp10_copy(fc->comp_ref_prob, default_comp_ref_p);
- vp10_copy(fc->single_ref_prob, default_single_ref_p);
+ av1_copy(fc->uv_mode_prob, default_uv_probs);
+ av1_copy(fc->y_mode_prob, default_if_y_probs);
+ av1_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
+ av1_copy(fc->partition_prob, default_partition_probs);
+ av1_copy(fc->intra_inter_prob, default_intra_inter_p);
+ av1_copy(fc->comp_inter_prob, default_comp_inter_p);
+ av1_copy(fc->comp_ref_prob, default_comp_ref_p);
+ av1_copy(fc->single_ref_prob, default_single_ref_p);
fc->tx_probs = default_tx_probs;
- vp10_copy(fc->skip_probs, default_skip_probs);
- vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+ av1_copy(fc->skip_probs, default_skip_probs);
+ av1_copy(fc->inter_mode_probs, default_inter_mode_probs);
#if CONFIG_MISC_FIXES
- vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
- vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+ av1_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+ av1_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
#endif
- vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
- vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
+ av1_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
+ av1_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
}
-const aom_tree_index vp10_switchable_interp_tree[TREE_SIZE(
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(
SWITCHABLE_FILTERS)] = { -EIGHTTAP, 2, -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP };
-void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
int i, j;
FRAME_CONTEXT *fc = cm->fc;
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -384,32 +384,32 @@
pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
- aom_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
+ aom_tree_merge_probs(av1_inter_mode_tree, pre_fc->inter_mode_probs[i],
counts->inter_mode[i], fc->inter_mode_probs[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
- aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
+ aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
counts->y_mode[i], fc->y_mode_prob[i]);
#if !CONFIG_MISC_FIXES
for (i = 0; i < INTRA_MODES; ++i)
- aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+ aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
counts->uv_mode[i], fc->uv_mode_prob[i]);
for (i = 0; i < PARTITION_CONTEXTS; i++)
- aom_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+ aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#endif
if (cm->interp_filter == SWITCHABLE) {
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
aom_tree_merge_probs(
- vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+ av1_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
counts->switchable_interp[i], fc->switchable_interp_prob[i]);
}
}
-void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
int i;
FRAME_CONTEXT *fc = cm->fc;
const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -422,18 +422,18 @@
unsigned int branch_ct_32x32p[TX_SIZES - 1][2];
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
- vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
+ av1_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
for (j = 0; j < TX_SIZES - 3; ++j)
fc->tx_probs.p8x8[i][j] =
mode_mv_merge_probs(pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]);
- vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
+ av1_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
branch_ct_16x16p);
for (j = 0; j < TX_SIZES - 2; ++j)
fc->tx_probs.p16x16[i][j] = mode_mv_merge_probs(
pre_fc->tx_probs.p16x16[i][j], branch_ct_16x16p[j]);
- vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
+ av1_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
branch_ct_32x32p);
for (j = 0; j < TX_SIZES - 1; ++j)
fc->tx_probs.p32x32[i][j] = mode_mv_merge_probs(
@@ -448,12 +448,12 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
int j;
for (j = 0; j < TX_TYPES; ++j)
- aom_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
+ aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
counts->intra_ext_tx[i][j],
fc->intra_ext_tx_prob[i][j]);
}
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- aom_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+ aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
}
@@ -463,19 +463,19 @@
fc->seg.pred_probs[i] =
mode_mv_merge_probs(pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
- aom_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
counts->seg.tree_mispred, fc->seg.tree_probs);
} else {
- aom_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
counts->seg.tree_total, fc->seg.tree_probs);
}
for (i = 0; i < INTRA_MODES; ++i)
- aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+ aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
counts->uv_mode[i], fc->uv_mode_prob[i]);
for (i = 0; i < PARTITION_CONTEXTS; i++)
- aom_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+ aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#endif
}
@@ -493,13 +493,13 @@
lf->mode_deltas[1] = 0;
}
-void vp10_setup_past_independence(VP10_COMMON *cm) {
+void av1_setup_past_independence(AV1_COMMON *cm) {
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
struct loopfilter *const lf = &cm->lf;
int i;
- vp10_clearall_segfeatures(&cm->seg);
+ av1_clearall_segfeatures(&cm->seg);
cm->seg.abs_delta = SEGMENT_DELTADATA;
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
@@ -509,16 +509,16 @@
memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
// Reset the mode ref deltas for loop filter
- vp10_zero(lf->last_ref_deltas);
- vp10_zero(lf->last_mode_deltas);
+ av1_zero(lf->last_ref_deltas);
+ av1_zero(lf->last_mode_deltas);
set_default_lf_deltas(lf);
// To force update of the sharpness
lf->last_sharpness_level = -1;
- vp10_default_coef_probs(cm);
+ av1_default_coef_probs(cm);
init_mode_probs(cm->fc);
- vp10_init_mv_probs(cm);
+ av1_init_mv_probs(cm);
cm->fc->initialized = 1;
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 40b4fc3..011d5f0 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ENTROPYMODE_H_
-#define VP10_COMMON_ENTROPYMODE_H_
+#ifndef AV1_COMMON_ENTROPYMODE_H_
+#define AV1_COMMON_ENTROPYMODE_H_
#include "av1/common/entropy.h"
#include "av1/common/entropymv.h"
@@ -28,7 +28,7 @@
#define INTER_OFFSET(mode) ((mode)-NEARESTMV)
-struct VP10Common;
+struct AV1Common;
struct tx_probs {
aom_prob p32x32[TX_SIZE_CONTEXTS][TX_SIZES - 1];
@@ -53,7 +53,7 @@
aom_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
aom_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
aom_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
- vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
+ av1_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
aom_prob
switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
1];
@@ -78,7 +78,7 @@
unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES];
unsigned int uv_mode[INTRA_MODES][INTRA_MODES];
unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
- vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
+ av1_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
unsigned int
eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
unsigned int
@@ -99,34 +99,34 @@
} FRAME_COUNTS;
extern const aom_prob
- vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+ av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
#if !CONFIG_MISC_FIXES
-extern const aom_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+extern const aom_prob av1_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
extern const aom_prob
- vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+ av1_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
#endif
-extern const aom_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
-extern const aom_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
-extern const aom_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
+extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
+extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+extern const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)];
extern const aom_tree_index
- vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
+ av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
-void vp10_setup_past_independence(struct VP10Common *cm);
+void av1_setup_past_independence(struct AV1Common *cm);
-void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
-void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
+void av1_adapt_intra_frame_probs(struct AV1Common *cm);
+void av1_adapt_inter_frame_probs(struct AV1Common *cm);
-void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
+void av1_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
unsigned int (*ct_32x32p)[2]);
-void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
+void av1_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
unsigned int (*ct_16x16p)[2]);
-void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
+void av1_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
unsigned int (*ct_8x8p)[2]);
-extern const aom_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)];
-static INLINE int vp10_ceil_log2(int n) {
+static INLINE int av1_ceil_log2(int n) {
int i = 1, p = 2;
while (p < n) {
i++;
@@ -139,4 +139,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPYMODE_H_
+#endif // AV1_COMMON_ENTROPYMODE_H_
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index 521d326..ab9c53b 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -15,12 +15,12 @@
// Integer pel reference mv threshold for use of high-precision 1/8 mv
#define COMPANDED_MVREF_THRESH 8
-const aom_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
+const aom_tree_index av1_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
-MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
};
/* clang-format off */
-const aom_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
+const aom_tree_index av1_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
-MV_CLASS_0, 2,
-MV_CLASS_1, 4,
6, 8,
@@ -34,11 +34,11 @@
};
/* clang-format on */
-const aom_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
+const aom_tree_index av1_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
-0, -1,
};
-const aom_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1,
+const aom_tree_index av1_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1,
4, -2, -3 };
static const nmv_context default_nmv_context = {
@@ -114,7 +114,7 @@
return c ? CLASS0_SIZE << (c + 2) : 0;
}
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset) {
const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
? MV_CLASS_10
: (MV_CLASS_TYPE)log_in_base_2[z >> 3];
@@ -122,7 +122,7 @@
return c;
}
-int vp10_use_mv_hp(const MV *ref) {
+int av1_use_mv_hp(const MV *ref) {
#if CONFIG_MISC_FIXES
(void)ref;
return 1;
@@ -140,7 +140,7 @@
comp_counts->sign[s] += incr;
z = (s ? -v : v) - 1; /* magnitude - 1 */
- c = vp10_get_mv_class(z, &o);
+ c = av1_get_mv_class(z, &o);
comp_counts->classes[c] += incr;
d = (o >> 3); /* int mv data */
@@ -160,9 +160,9 @@
}
}
-void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
+void av1_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
if (counts != NULL) {
- const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
+ const MV_JOINT_TYPE j = av1_get_mv_joint(mv);
++counts->joints[j];
if (mv_joint_vertical(j)) {
@@ -177,14 +177,14 @@
}
}
-void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
+void av1_adapt_mv_probs(AV1_COMMON *cm, int allow_hp) {
int i, j;
nmv_context *fc = &cm->fc->nmvc;
const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
const nmv_context_counts *counts = &cm->counts.mv;
- aom_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+ aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
fc->joints);
for (i = 0; i < 2; ++i) {
@@ -193,19 +193,19 @@
const nmv_component_counts *c = &counts->comps[i];
comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign);
- aom_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+ aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
comp->classes);
- aom_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+ aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
comp->class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
for (j = 0; j < CLASS0_SIZE; ++j)
- aom_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
c->class0_fp[j], comp->class0_fp[j]);
- aom_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+ aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
if (allow_hp) {
comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
@@ -214,4 +214,4 @@
}
}
-void vp10_init_mv_probs(VP10_COMMON *cm) { cm->fc->nmvc = default_nmv_context; }
+void av1_init_mv_probs(AV1_COMMON *cm) { cm->fc->nmvc = default_nmv_context; }
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index 89a5570..54e4ffe 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ENTROPYMV_H_
-#define VP10_COMMON_ENTROPYMV_H_
+#ifndef AV1_COMMON_ENTROPYMV_H_
+#define AV1_COMMON_ENTROPYMV_H_
#include "./aom_config.h"
@@ -22,12 +22,12 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
-void vp10_init_mv_probs(struct VP10Common *cm);
+void av1_init_mv_probs(struct AV1Common *cm);
-void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp);
-int vp10_use_mv_hp(const MV *ref);
+void av1_adapt_mv_probs(struct AV1Common *cm, int usehp);
+int av1_use_mv_hp(const MV *ref);
#define MV_UPDATE_PROB 252
@@ -77,10 +77,10 @@
#define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
#define MV_LOW (-(1 << MV_IN_USE_BITS))
-extern const aom_tree_index vp10_mv_joint_tree[];
-extern const aom_tree_index vp10_mv_class_tree[];
-extern const aom_tree_index vp10_mv_class0_tree[];
-extern const aom_tree_index vp10_mv_fp_tree[];
+extern const aom_tree_index av1_mv_joint_tree[];
+extern const aom_tree_index av1_mv_class_tree[];
+extern const aom_tree_index av1_mv_class0_tree[];
+extern const aom_tree_index av1_mv_fp_tree[];
typedef struct {
aom_prob sign;
@@ -98,7 +98,7 @@
nmv_component comps[2];
} nmv_context;
-static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
+static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) {
if (mv->row == 0) {
return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
} else {
@@ -106,7 +106,7 @@
}
}
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset);
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset);
typedef struct {
unsigned int sign[2];
@@ -124,10 +124,10 @@
nmv_component_counts comps[2];
} nmv_context_counts;
-void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
+void av1_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_ENTROPYMV_H_
+#endif // AV1_COMMON_ENTROPYMV_H_
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 52c9592..a133a28 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ENUMS_H_
-#define VP10_COMMON_ENUMS_H_
+#ifndef AV1_COMMON_ENUMS_H_
+#define AV1_COMMON_ENUMS_H_
#include "./aom_config.h"
#include "aom/aom_integer.h"
@@ -143,4 +143,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ENUMS_H_
+#endif // AV1_COMMON_ENUMS_H_
diff --git a/av1/common/filter.c b/av1/common/filter.c
index 8710f80..aa6626a 100644
--- a/av1/common/filter.c
+++ b/av1/common/filter.c
@@ -64,6 +64,6 @@
{ 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 1, 38, 64, 32, -1, -3 }
};
-const InterpKernel *vp10_filter_kernels[4] = {
+const InterpKernel *av1_filter_kernels[4] = {
sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters
};
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 07a1bd6..6e3f547 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_FILTER_H_
-#define VP10_COMMON_FILTER_H_
+#ifndef AV1_COMMON_FILTER_H_
+#define AV1_COMMON_FILTER_H_
#include "./aom_config.h"
#include "aom/aom_integer.h"
@@ -33,10 +33,10 @@
typedef uint8_t INTERP_FILTER;
-extern const InterpKernel *vp10_filter_kernels[4];
+extern const InterpKernel *av1_filter_kernels[4];
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_FILTER_H_
+#endif // AV1_COMMON_FILTER_H_
diff --git a/av1/common/frame_buffers.c b/av1/common/frame_buffers.c
index c9eeb25..0ca3919 100644
--- a/av1/common/frame_buffers.c
+++ b/av1/common/frame_buffers.c
@@ -14,9 +14,9 @@
#include "av1/common/frame_buffers.h"
#include "aom_mem/aom_mem.h"
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
assert(list != NULL);
- vp10_free_internal_frame_buffers(list);
+ av1_free_internal_frame_buffers(list);
list->num_internal_frame_buffers =
VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
@@ -25,7 +25,7 @@
return (list->int_fb == NULL);
}
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list) {
int i;
assert(list != NULL);
@@ -38,7 +38,7 @@
list->int_fb = NULL;
}
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
aom_codec_frame_buffer_t *fb) {
int i;
InternalFrameBufferList *const int_fb_list =
@@ -73,7 +73,7 @@
return 0;
}
-int vp10_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
(void)cb_priv;
if (int_fb) int_fb->in_use = 0;
diff --git a/av1/common/frame_buffers.h b/av1/common/frame_buffers.h
index 51d3a0c..c062ffe 100644
--- a/av1/common/frame_buffers.h
+++ b/av1/common/frame_buffers.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_FRAME_BUFFERS_H_
-#define VP10_COMMON_FRAME_BUFFERS_H_
+#ifndef AV1_COMMON_FRAME_BUFFERS_H_
+#define AV1_COMMON_FRAME_BUFFERS_H_
#include "aom/aom_frame_buffer.h"
#include "aom/aom_integer.h"
@@ -31,24 +31,24 @@
} InternalFrameBufferList;
// Initializes |list|. Returns 0 on success.
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list);
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list);
// Free any data allocated to the frame buffers.
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list);
// Callback used by libaom to request an external frame buffer. |cb_priv|
// Callback private data, which points to an InternalFrameBufferList.
// |min_size| is the minimum size in bytes needed to decode the next frame.
// |fb| pointer to the frame buffer.
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
aom_codec_frame_buffer_t *fb);
// Callback used by libaom when there are no references to the frame buffer.
// |cb_priv| is not used. |fb| pointer to the frame buffer.
-int vp10_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_FRAME_BUFFERS_H_
+#endif // AV1_COMMON_FRAME_BUFFERS_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index b9679d8..37eb5a9 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -18,7 +18,7 @@
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
-void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
const transform_2d IHT_4[] = {
{ idct4_c, idct4_c }, // DCT_DCT = 0
@@ -57,7 +57,7 @@
{ iadst8_c, iadst8_c } // ADST_ADST = 3
};
-void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
int i, j;
tran_low_t out[8 * 8];
@@ -90,7 +90,7 @@
{ iadst16_c, iadst16_c } // ADST_ADST = 3
};
-void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
int i, j;
tran_low_t out[16 * 16];
@@ -117,7 +117,7 @@
}
// idct
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob > 1)
aom_idct4x4_16_add(input, dest, stride);
@@ -125,7 +125,7 @@
aom_idct4x4_1_add(input, dest, stride);
}
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob > 1)
aom_iwht4x4_16_add(input, dest, stride);
@@ -133,14 +133,14 @@
aom_iwht4x4_1_add(input, dest, stride);
}
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
// If dc is 1, then input[0] is the reconstructed value, do not need
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to decide what to do.
- // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
// Combine that with code here.
if (eob == 1)
// DC only DCT coefficient
@@ -151,7 +151,7 @@
aom_idct8x8_64_add(input, dest, stride);
}
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
/* The calculation can be simplified if there are not many non-zero dct
* coefficients. Use eobs to separate different cases. */
@@ -163,7 +163,7 @@
aom_idct16x16_256_add(input, dest, stride);
}
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob) {
if (eob == 1)
aom_idct32x32_1_add(input, dest, stride);
@@ -174,48 +174,48 @@
aom_idct32x32_1024_add(input, dest, stride);
}
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type, int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_iwht4x4_add(input, dest, stride, eob);
+ av1_iwht4x4_add(input, dest, stride, eob);
} else {
switch (tx_type) {
- case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct4x4_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
default: assert(0); break;
}
}
}
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct8x8_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
default: assert(0); break;
}
}
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct16x16_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+ case ADST_ADST: av1_iht16x16_256_add(input, dest, stride, tx_type); break;
default: assert(0); break;
}
}
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
+ case DCT_DCT: av1_idct32x32_add(input, dest, stride, eob); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST: assert(0); break;
@@ -223,8 +223,8 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int tx_type, int bd) {
const highbd_transform_2d IHT_4[] = {
{ aom_highbd_idct4_c, aom_highbd_idct4_c }, // DCT_DCT = 0
@@ -264,7 +264,7 @@
{ aom_highbd_iadst8_c, aom_highbd_iadst8_c } // ADST_ADST = 3
};
-void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int tx_type, int bd) {
int i, j;
tran_low_t out[8 * 8];
@@ -298,7 +298,7 @@
{ aom_highbd_iadst16_c, aom_highbd_iadst16_c } // ADST_ADST = 3
};
-void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int tx_type, int bd) {
int i, j;
tran_low_t out[16 * 16];
@@ -326,7 +326,7 @@
}
// idct
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
if (eob > 1)
aom_highbd_idct4x4_16_add(input, dest, stride, bd);
@@ -334,7 +334,7 @@
aom_highbd_idct4x4_1_add(input, dest, stride, bd);
}
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
if (eob > 1)
aom_highbd_iwht4x4_16_add(input, dest, stride, bd);
@@ -342,14 +342,14 @@
aom_highbd_iwht4x4_1_add(input, dest, stride, bd);
}
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
// If dc is 1, then input[0] is the reconstructed value, do not need
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to decide what to do.
- // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
// Combine that with code here.
// DC only DCT coefficient
if (eob == 1) {
@@ -361,7 +361,7 @@
}
}
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd) {
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to separate different cases.
@@ -375,7 +375,7 @@
}
}
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd) {
// Non-zero coeff only in upper-left 8x8
if (eob == 1) {
@@ -387,63 +387,63 @@
}
}
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd, TX_TYPE tx_type,
int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd);
+ av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
} else {
switch (tx_type) {
case DCT_DCT:
- vp10_highbd_idct4x4_add(input, dest, stride, eob, bd);
+ av1_highbd_idct4x4_add(input, dest, stride, eob, bd);
break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
+ av1_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
break;
default: assert(0); break;
}
}
}
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT: vp10_highbd_idct8x8_add(input, dest, stride, eob, bd); break;
+ case DCT_DCT: av1_highbd_idct8x8_add(input, dest, stride, eob, bd); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd);
+ av1_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd);
break;
default: assert(0); break;
}
}
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
switch (tx_type) {
case DCT_DCT:
- vp10_highbd_idct16x16_add(input, dest, stride, eob, bd);
+ av1_highbd_idct16x16_add(input, dest, stride, eob, bd);
break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd);
+ av1_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd);
break;
default: assert(0); break;
}
}
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type) {
switch (tx_type) {
case DCT_DCT:
- vp10_highbd_idct32x32_add(input, dest, stride, eob, bd);
+ av1_highbd_idct32x32_add(input, dest, stride, eob, bd);
break;
case ADST_DCT:
case DCT_ADST:
@@ -451,4 +451,4 @@
default: assert(0); break;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/idct.h b/av1/common/idct.h
index 2d554ab..3b680c6 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_IDCT_H_
-#define VP10_COMMON_IDCT_H_
+#ifndef AV1_COMMON_IDCT_H_
+#define AV1_COMMON_IDCT_H_
#include <assert.h>
@@ -31,53 +31,53 @@
transform_1d cols, rows; // vertical and horizontal
} transform_2d;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
typedef struct {
highbd_transform_1d cols, rows; // vertical and horizontal
} highbd_transform_2d;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob);
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob);
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type, int lossless);
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
int eob, TX_TYPE tx_type);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd);
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd);
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd);
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd);
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd);
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd, TX_TYPE tx_type,
int lossless);
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
int stride, int eob, int bd,
TX_TYPE tx_type);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_IDCT_H_
+#endif // AV1_COMMON_IDCT_H_
diff --git a/av1/common/loopfilter.c b/av1/common/loopfilter.c
index 2cb16f5..b0b6596 100644
--- a/av1/common/loopfilter.c
+++ b/av1/common/loopfilter.c
@@ -237,7 +237,7 @@
->lvl[mbmi->segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
}
-void vp10_loop_filter_init(VP10_COMMON *cm) {
+void av1_loop_filter_init(AV1_COMMON *cm) {
loop_filter_info_n *lfi = &cm->lf_info;
struct loopfilter *lf = &cm->lf;
int lvl;
@@ -251,7 +251,7 @@
memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
}
-void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
+void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl) {
int seg_id;
// n_shift is the multiplier for lf_deltas
// the multiplier is 1 for when filter_lvl is between 0 and 31;
@@ -393,7 +393,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_vert_row2(
int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
unsigned int mask_8x8_l, unsigned int mask_4x4_l,
@@ -489,7 +489,7 @@
mask_4x4_int_1 >>= 1;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static void filter_selectively_horiz(
uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
@@ -584,7 +584,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_horiz(
uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -683,7 +683,7 @@
mask_4x4_int >>= count;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// This function ors into the current lfm structure, where to do loop
// filters for the specific mi we are looking at. It uses information
@@ -825,7 +825,7 @@
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
+void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
MODE_INFO **mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
@@ -860,7 +860,7 @@
(mi_col + MI_BLOCK_SIZE > cm->mi_cols ? cm->mi_cols - mi_col
: MI_BLOCK_SIZE);
- vp10_zero(*lfm);
+ av1_zero(*lfm);
assert(mip[0] != NULL);
// TODO(jimbankoski): Try moving most of the following code into decode
@@ -1115,7 +1115,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_filter_selectively_vert(
uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -1149,9 +1149,9 @@
mask_4x4_int >>= 1;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_filter_block_plane_non420(VP10_COMMON *cm,
+void av1_filter_block_plane_non420(AV1_COMMON *cm,
struct macroblockd_plane *plane,
MODE_INFO **mi_8x8, int mi_row,
int mi_col) {
@@ -1253,7 +1253,7 @@
// Disable filtering on the leftmost column
border_mask = ~(mi_col == 0);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert(
CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1270,7 +1270,7 @@
filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
mask_8x8_c & border_mask, mask_4x4_c & border_mask,
mask_4x4_int[r], &cm->lf_info, &lfl[r << 3]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 8 * dst->stride;
mi_8x8 += row_step_stride;
}
@@ -1294,7 +1294,7 @@
mask_8x8_r = mask_8x8[r];
mask_4x4_r = mask_4x4[r];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1309,12 +1309,12 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
&lfl[r << 3]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 8 * dst->stride;
}
}
-void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
+void av1_filter_block_plane_ss00(AV1_COMMON *const cm,
struct macroblockd_plane *const plane,
int mi_row, LOOP_FILTER_MASK *lfm) {
struct buf_2d *const dst = &plane->dst;
@@ -1335,7 +1335,7 @@
unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
// Disable filtering on the leftmost column.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert_row2(
plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1350,7 +1350,7 @@
filter_selectively_vert_row2(
plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 16 * dst->stride;
mask_16x16 >>= 16;
mask_8x8 >>= 16;
@@ -1380,7 +1380,7 @@
mask_4x4_r = mask_4x4 & 0xff;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(
CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1395,7 +1395,7 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
&lfm->lfl_y[r << 3]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 8 * dst->stride;
mask_16x16 >>= 8;
@@ -1405,7 +1405,7 @@
}
}
-void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
+void av1_filter_block_plane_ss11(AV1_COMMON *const cm,
struct macroblockd_plane *const plane,
int mi_row, LOOP_FILTER_MASK *lfm) {
struct buf_2d *const dst = &plane->dst;
@@ -1439,7 +1439,7 @@
unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
// Disable filtering on the leftmost column.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_vert_row2(
plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1455,7 +1455,7 @@
filter_selectively_vert_row2(
plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r << 1]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 16 * dst->stride;
mask_16x16 >>= 8;
@@ -1494,7 +1494,7 @@
mask_4x4_r = mask_4x4 & 0xf;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1509,7 +1509,7 @@
filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
&lfm->lfl_uv[r << 1]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst->buf += 8 * dst->stride;
mask_16x16 >>= 4;
@@ -1519,7 +1519,7 @@
}
}
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
@@ -1542,22 +1542,22 @@
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
- vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+ av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
// TODO(JBB): Make setup_mask work for non 420.
- vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+ av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
- vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
for (plane = 1; plane < num_planes; ++plane) {
switch (path) {
case LF_PATH_420:
- vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_444:
- vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_SLOW:
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
mi_row, mi_col);
break;
}
@@ -1566,7 +1566,7 @@
}
}
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
MACROBLOCKD *xd, int frame_filter_level, int y_only,
int partial_frame) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
@@ -1579,13 +1579,13 @@
mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
- vp10_loop_filter_frame_init(cm, frame_filter_level);
- vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
+ av1_loop_filter_frame_init(cm, frame_filter_level);
+ av1_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
}
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm,
+ struct AV1Common *cm,
const struct macroblockd_plane planes[MAX_MB_PLANE]) {
lf_data->frame_buffer = frame_buffer;
lf_data->cm = cm;
@@ -1595,9 +1595,9 @@
memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
}
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
(void)unused;
- vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+ av1_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
lf_data->start, lf_data->stop, lf_data->y_only);
return 1;
}
diff --git a/av1/common/loopfilter.h b/av1/common/loopfilter.h
index 73273e4..a8992f4 100644
--- a/av1/common/loopfilter.h
+++ b/av1/common/loopfilter.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_LOOPFILTER_H_
-#define VP10_COMMON_LOOPFILTER_H_
+#ifndef AV1_COMMON_LOOPFILTER_H_
+#define AV1_COMMON_LOOPFILTER_H_
#include "aom_ports/mem.h"
#include "./aom_config.h"
@@ -92,49 +92,49 @@
} LOOP_FILTER_MASK;
/* assorted loopfilter functions which get used elsewhere */
-struct VP10Common;
+struct AV1Common;
struct macroblockd;
-struct VP10LfSyncData;
+struct AV1LfSyncData;
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
+void av1_setup_mask(struct AV1Common *const cm, const int mi_row,
const int mi_col, MODE_INFO **mi_8x8,
const int mode_info_stride, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
+void av1_filter_block_plane_ss00(struct AV1Common *const cm,
struct macroblockd_plane *const plane,
int mi_row, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
+void av1_filter_block_plane_ss11(struct AV1Common *const cm,
struct macroblockd_plane *const plane,
int mi_row, LOOP_FILTER_MASK *lfm);
-void vp10_filter_block_plane_non420(struct VP10Common *cm,
+void av1_filter_block_plane_non420(struct AV1Common *cm,
struct macroblockd_plane *plane,
MODE_INFO **mi_8x8, int mi_row, int mi_col);
-void vp10_loop_filter_init(struct VP10Common *cm);
+void av1_loop_filter_init(struct AV1Common *cm);
// Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(),
-// vp10_loop_filter_frame()
+// This should be called before av1_loop_filter_rows(),
+// av1_loop_filter_frame()
// calls this function directly.
-void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
+void av1_loop_filter_frame_init(struct AV1Common *cm, int default_filt_lvl);
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
struct macroblockd *mbd, int filter_level,
int y_only, int partial_frame);
// Apply the loop filter to [start, stop) macro block rows in frame_buffer.
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm,
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
+ struct AV1Common *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only);
typedef struct LoopFilterWorkerData {
YV12_BUFFER_CONFIG *frame_buffer;
- struct VP10Common *cm;
+ struct AV1Common *cm;
struct macroblockd_plane planes[MAX_MB_PLANE];
int start;
@@ -142,14 +142,14 @@
int y_only;
} LFWorkerData;
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
- struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
+ struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
// Operates on the rows described by 'lf_data'.
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_LOOPFILTER_H_
+#endif // AV1_COMMON_LOOPFILTER_H_
diff --git a/av1/common/mips/dspr2/itrans16_dspr2.c b/av1/common/mips/dspr2/itrans16_dspr2.c
index e9db822..aaf3972 100644
--- a/av1/common/mips/dspr2/itrans16_dspr2.c
+++ b/av1/common/mips/dspr2/itrans16_dspr2.c
@@ -22,7 +22,7 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+void av1_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
@@ -91,7 +91,7 @@
dest[j * pitch + i]);
}
} break;
- default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans4_dspr2.c b/av1/common/mips/dspr2/itrans4_dspr2.c
index ee8f566..a49db1f 100644
--- a/av1/common/mips/dspr2/itrans4_dspr2.c
+++ b/av1/common/mips/dspr2/itrans4_dspr2.c
@@ -22,7 +22,7 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+void av1_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
@@ -85,7 +85,7 @@
ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
}
break;
- default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans8_dspr2.c b/av1/common/mips/dspr2/itrans8_dspr2.c
index 0c5a708..1828bbc 100644
--- a/av1/common/mips/dspr2/itrans8_dspr2.c
+++ b/av1/common/mips/dspr2/itrans8_dspr2.c
@@ -21,7 +21,7 @@
#include "aom_ports/mem.h"
#if HAVE_DSPR2
-void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+void av1_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride, int tx_type) {
int i, j;
DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
@@ -79,7 +79,7 @@
ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
}
break;
- default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
+ default: printf("av1_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
}
}
#endif // #if HAVE_DSPR2
diff --git a/av1/common/mips/msa/idct16x16_msa.c b/av1/common/mips/msa/idct16x16_msa.c
index 9b75b31..54f3841 100644
--- a/av1/common/mips/msa/idct16x16_msa.c
+++ b/av1/common/mips/msa/idct16x16_msa.c
@@ -14,7 +14,7 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride, int32_t tx_type) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
diff --git a/av1/common/mips/msa/idct4x4_msa.c b/av1/common/mips/msa/idct4x4_msa.c
index b6269bb..8ad1ba8 100644
--- a/av1/common/mips/msa/idct4x4_msa.c
+++ b/av1/common/mips/msa/idct4x4_msa.c
@@ -14,7 +14,7 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride, int32_t tx_type) {
v8i16 in0, in1, in2, in3;
diff --git a/av1/common/mips/msa/idct8x8_msa.c b/av1/common/mips/msa/idct8x8_msa.c
index 110ce71..8caf8fc 100644
--- a/av1/common/mips/msa/idct8x8_msa.c
+++ b/av1/common/mips/msa/idct8x8_msa.c
@@ -14,7 +14,7 @@
#include "av1/common/enums.h"
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride, int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
diff --git a/av1/common/mv.h b/av1/common/mv.h
index 06261af..445d5d4 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_MV_H_
-#define VP10_COMMON_MV_H_
+#ifndef AV1_COMMON_MV_H_
+#define AV1_COMMON_MV_H_
#include "aom/aom_integer.h"
@@ -53,4 +53,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_MV_H_
+#endif // AV1_COMMON_MV_H_
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 7ce74c6..752d03e 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -12,7 +12,7 @@
// This function searches the neighbourhood of a given MB/SB
// to try and find candidate reference vectors.
-static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list, int block, int mi_row,
int mi_col, find_mv_refs_sync sync,
@@ -161,7 +161,7 @@
#endif
}
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data,
@@ -171,14 +171,14 @@
}
static void lower_mv_precision(MV *mv, int allow_hp) {
- const int use_hp = allow_hp && vp10_use_mv_hp(mv);
+ const int use_hp = allow_hp && av1_use_mv_hp(mv);
if (!use_hp) {
if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
}
}
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
int_mv *near_mv) {
int i;
// Make sure all the candidates are properly clamped etc
@@ -189,7 +189,7 @@
*near_mv = mvlist[1];
}
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv,
uint8_t *mode_context) {
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index 66cc48a..014a53d 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -8,8 +8,8 @@
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_MVREF_COMMON_H_
-#define VP10_COMMON_MVREF_COMMON_H_
+#ifndef AV1_COMMON_MVREF_COMMON_H_
+#define AV1_COMMON_MVREF_COMMON_H_
#include "av1/common/onyxc_int.h"
#include "av1/common/blockd.h"
@@ -303,7 +303,7 @@
}
typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data,
@@ -312,10 +312,10 @@
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
// score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
int_mv *near_mv);
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv,
uint8_t *mode_context);
@@ -324,4 +324,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_MVREF_COMMON_H_
+#endif // AV1_COMMON_MVREF_COMMON_H_
diff --git a/av1/common/odintrin.h b/av1/common/odintrin.h
index c3a4033..47f17bf 100644
--- a/av1/common/odintrin.h
+++ b/av1/common/odintrin.h
@@ -8,8 +8,8 @@
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ODINTRIN_H_
-#define VP10_COMMON_ODINTRIN_H_
+#ifndef AV1_COMMON_ODINTRIN_H_
+#define AV1_COMMON_ODINTRIN_H_
#include "av1/common/enums.h"
#include "aom/aom_integer.h"
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 0b26d16..83fead3 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_ONYXC_INT_H_
-#define VP10_COMMON_ONYXC_INT_H_
+#ifndef AV1_COMMON_ONYXC_INT_H_
+#define AV1_COMMON_ONYXC_INT_H_
#include "./aom_config.h"
#include "aom/internal/aom_codec_internal.h"
@@ -122,7 +122,7 @@
InternalFrameBufferList int_frame_buffers;
} BufferPool;
-typedef struct VP10Common {
+typedef struct AV1Common {
struct aom_internal_error_info error;
aom_color_space_t color_space;
int color_range;
@@ -139,7 +139,7 @@
int subsampling_x;
int subsampling_y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth; // Marks if we need to use 16bit frame buffers.
#endif
@@ -229,9 +229,9 @@
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
// Separate mi functions between encoder and decoder.
- int (*alloc_mi)(struct VP10Common *cm, int mi_size);
- void (*free_mi)(struct VP10Common *cm);
- void (*setup_mi)(struct VP10Common *cm);
+ int (*alloc_mi)(struct AV1Common *cm, int mi_size);
+ void (*free_mi)(struct AV1Common *cm);
+ void (*setup_mi)(struct AV1Common *cm);
// Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
// area will be NULL.
@@ -316,7 +316,7 @@
#if CONFIG_DERING
int dering_level;
#endif
-} VP10_COMMON;
+} AV1_COMMON;
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
// frame reference count.
@@ -336,18 +336,18 @@
#endif
}
-static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
if (index < 0 || index >= REF_FRAMES) return NULL;
if (cm->ref_frame_map[index] < 0) return NULL;
assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
}
-static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP10_COMMON *cm) {
+static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(AV1_COMMON *cm) {
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
-static INLINE int get_free_fb(VP10_COMMON *cm) {
+static INLINE int get_free_fb(AV1_COMMON *cm) {
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
@@ -381,11 +381,11 @@
return ALIGN_POWER_OF_TWO(n_mis, MI_BLOCK_SIZE_LOG2);
}
-static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
+static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
return cm->frame_type == KEY_FRAME || cm->intra_only;
}
-static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
tran_low_t *dqcoeff) {
int i;
@@ -461,13 +461,13 @@
}
}
-static INLINE const aom_prob *get_y_mode_probs(const VP10_COMMON *cm,
+static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
- const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
- const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
+ const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+ const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
return cm->kf_y_prob[above][left];
}
@@ -504,4 +504,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_ONYXC_INT_H_
+#endif // AV1_COMMON_ONYXC_INT_H_
diff --git a/av1/common/pred_common.c b/av1/common/pred_common.c
index 590324b..508dac5 100644
--- a/av1/common/pred_common.c
+++ b/av1/common/pred_common.c
@@ -13,7 +13,7 @@
#include "av1/common/seg_common.h"
// Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -44,7 +44,7 @@
// 1 - intra/inter, inter/intra
// 2 - intra/--, --/intra
// 3 - intra/intra
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
+int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
@@ -61,7 +61,7 @@
}
}
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
@@ -104,7 +104,7 @@
}
// Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
@@ -186,7 +186,7 @@
return pred_context;
}
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -252,7 +252,7 @@
return pred_context;
}
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 56691ee..a07e3f2 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_PRED_COMMON_H_
-#define VP10_COMMON_PRED_COMMON_H_
+#ifndef AV1_COMMON_PRED_COMMON_H_
+#define AV1_COMMON_PRED_COMMON_H_
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
@@ -20,7 +20,7 @@
extern "C" {
#endif
-static INLINE int get_segment_id(const VP10_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *cm,
const uint8_t *segment_ids, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -39,7 +39,7 @@
return segment_id;
}
-static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+static INLINE int av1_get_pred_context_seg_id(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_sip =
@@ -49,12 +49,12 @@
return above_sip + left_sip;
}
-static INLINE aom_prob vp10_get_pred_prob_seg_id(
+static INLINE aom_prob av1_get_pred_prob_seg_id(
const struct segmentation_probs *segp, const MACROBLOCKD *xd) {
- return segp->pred_probs[vp10_get_pred_context_seg_id(xd)];
+ return segp->pred_probs[av1_get_pred_context_seg_id(xd)];
}
-static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
+static INLINE int av1_get_skip_context(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
@@ -62,49 +62,49 @@
return above_skip + left_skip;
}
-static INLINE aom_prob vp10_get_skip_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_skip_prob(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->skip_probs[vp10_get_skip_context(xd)];
+ return cm->fc->skip_probs[av1_get_skip_context(xd)];
}
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
+int av1_get_intra_inter_context(const MACROBLOCKD *xd);
-static INLINE aom_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_intra_inter_prob(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
+ return cm->fc->intra_inter_prob[av1_get_intra_inter_context(xd)];
}
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
const MACROBLOCKD *xd);
-static INLINE aom_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_reference_mode_prob(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
+ return cm->fc->comp_inter_prob[av1_get_reference_mode_context(cm, xd)];
}
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
const MACROBLOCKD *xd);
-static INLINE aom_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
+ const int pred_context = av1_get_pred_context_comp_ref_p(cm, xd);
return cm->fc->comp_ref_prob[pred_context];
}
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
-static INLINE aom_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_single_ref_p1(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p1(xd)][0];
}
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
-static INLINE aom_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_single_ref_p2(const AV1_COMMON *cm,
const MACROBLOCKD *xd) {
- return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
+ return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p2(xd)][1];
}
// Returns a context number for the given MB prediction signal
@@ -158,4 +158,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_PRED_COMMON_H_
+#endif // AV1_COMMON_PRED_COMMON_H_
diff --git a/av1/common/quant_common.c b/av1/common/quant_common.c
index 7859052..2a5a946 100644
--- a/av1/common/quant_common.c
+++ b/av1/common/quant_common.c
@@ -41,7 +41,7 @@
1184, 1232, 1282, 1336,
};
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
4, 9, 10, 13, 15, 17, 20, 22, 25, 28, 31, 34, 37,
40, 43, 47, 50, 53, 57, 60, 64, 68, 71, 75, 78, 82,
@@ -116,7 +116,7 @@
1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
};
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
4, 9, 11, 13, 16, 18, 21, 24, 27, 30, 33, 37, 40,
44, 48, 51, 55, 59, 63, 67, 71, 75, 79, 83, 88, 92,
@@ -168,8 +168,8 @@
};
#endif
-int16_t vp10_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
-#if CONFIG_VPX_HIGHBITDEPTH
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
@@ -184,8 +184,8 @@
#endif
}
-int16_t vp10_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
-#if CONFIG_VPX_HIGHBITDEPTH
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
@@ -200,7 +200,7 @@
#endif
}
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
int base_qindex) {
if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
@@ -213,11 +213,11 @@
}
#if CONFIG_AOM_QM
-qm_val_t* aom_iqmatrix(VP10_COMMON* cm, int qmlevel, int is_chroma,
+qm_val_t* aom_iqmatrix(AV1_COMMON* cm, int qmlevel, int is_chroma,
int log2sizem2, int is_intra) {
return &cm->giqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
}
-qm_val_t* aom_qmatrix(VP10_COMMON* cm, int qmlevel, int is_chroma,
+qm_val_t* aom_qmatrix(AV1_COMMON* cm, int qmlevel, int is_chroma,
int log2sizem2, int is_intra) {
return &cm->gqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
}
@@ -227,7 +227,7 @@
static uint16_t
wt_matrix_ref[NUM_QM_LEVELS][2][2][4 * 4 + 8 * 8 + 16 * 16 + 32 * 32];
-void aom_qm_init(VP10_COMMON* cm) {
+void aom_qm_init(AV1_COMMON* cm) {
int q, c, f, t, size;
int current;
for (q = 0; q < NUM_QM_LEVELS; ++q) {
diff --git a/av1/common/quant_common.h b/av1/common/quant_common.h
index c806c7a..e6bb62e 100644
--- a/av1/common/quant_common.h
+++ b/av1/common/quant_common.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_QUANT_COMMON_H_
-#define VP10_COMMON_QUANT_COMMON_H_
+#ifndef AV1_COMMON_QUANT_COMMON_H_
+#define AV1_COMMON_QUANT_COMMON_H_
#include "aom/aom_codec.h"
#include "av1/common/seg_common.h"
@@ -35,12 +35,12 @@
#define DEFAULT_QM_LAST (NUM_QM_LEVELS - 1)
#endif
-struct VP10Common;
+struct AV1Common;
-int16_t vp10_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
-int16_t vp10_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
int base_qindex);
#if CONFIG_AOM_QM
// Reduce the large number of quantizers to a smaller number of levels for which
@@ -50,10 +50,10 @@
qmlevel = VPXMIN(qmlevel + first, NUM_QM_LEVELS - 1);
return qmlevel;
}
-void aom_qm_init(struct VP10Common *cm);
-qm_val_t *aom_iqmatrix(struct VP10Common *cm, int qindex, int comp,
+void aom_qm_init(struct AV1Common *cm);
+qm_val_t *aom_iqmatrix(struct AV1Common *cm, int qindex, int comp,
int log2sizem2, int is_intra);
-qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
+qm_val_t *aom_qmatrix(struct AV1Common *cm, int qindex, int comp,
int log2sizem2, int is_intra);
#endif
@@ -61,4 +61,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_QUANT_COMMON_H_
+#endif // AV1_COMMON_QUANT_COMMON_H_
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index ee43ffe..4a9a98a 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -20,8 +20,8 @@
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
const InterpKernel *kernel, enum mv_precision precision, int x, int y,
@@ -29,7 +29,7 @@
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+ MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
const int subpel_x = mv.col & SUBPEL_MASK;
const int subpel_y = mv.row & SUBPEL_MASK;
@@ -38,9 +38,9 @@
high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf,
w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, const MV *src_mv,
const struct scale_factors *sf, int w, int h,
int ref, const InterpKernel *kernel,
@@ -48,7 +48,7 @@
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+ MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
const int subpel_x = mv.col & SUBPEL_MASK;
const int subpel_y = mv.row & SUBPEL_MASK;
@@ -64,7 +64,7 @@
struct macroblockd_plane *const pd = &xd->plane[plane];
const MODE_INFO *mi = xd->mi[0];
const int is_compound = has_second_ref(&mi->mbmi);
- const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+ const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) {
@@ -87,11 +87,11 @@
uint8_t *pre;
MV32 scaled_mv;
int xs, ys, subpel_x, subpel_y;
- const int is_scaled = vp10_is_scaled(sf);
+ const int is_scaled = av1_is_scaled(sf);
if (is_scaled) {
pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
- scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
@@ -105,7 +105,7 @@
pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
(scaled_mv.col >> SUBPEL_BITS);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
@@ -116,11 +116,11 @@
#else
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
int ir, int ic, int mi_row, int mi_col) {
struct macroblockd_plane *const pd = &xd->plane[plane];
MODE_INFO *const mi = xd->mi[0];
@@ -131,32 +131,32 @@
uint8_t *const dst = &pd->dst.buf[(ir * pd->dst.stride + ic) << 2];
int ref;
const int is_compound = has_second_ref(&mi->mbmi);
- const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+ const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
for (ref = 0; ref < 1 + is_compound; ++ref) {
const uint8_t *pre =
&pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_build_inter_predictor(
+ av1_highbd_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
mi_row * MI_SIZE + 4 * ir, xd->bd);
} else {
- vp10_build_inter_predictor(
+ av1_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
mi_row * MI_SIZE + 4 * ir);
}
#else
- vp10_build_inter_predictor(
+ av1_build_inter_predictor(
pre, pd->pre[ref].stride, dst, pd->dst.stride,
&mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
mi_row * MI_SIZE + 4 * ir);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -193,29 +193,29 @@
}
}
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
}
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize, int plane) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
}
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
MAX_MB_PLANE - 1);
}
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
MAX_MB_PLANE - 1);
}
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col) {
uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
@@ -231,7 +231,7 @@
}
}
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col, const struct scale_factors *sf) {
if (src != NULL) {
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 6996ea7..183ec30 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_RECONINTER_H_
-#define VP10_COMMON_RECONINTER_H_
+#ifndef AV1_COMMON_RECONINTER_H_
+#define AV1_COMMON_RECONINTER_H_
#include "av1/common/filter.h"
#include "av1/common/onyxc_int.h"
@@ -32,7 +32,7 @@
ys, w, h);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void high_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const int subpel_x, const int subpel_y, const struct scale_factors *sf,
@@ -41,7 +41,7 @@
src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y],
ys, w, h, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
@@ -113,29 +113,29 @@
int bh, int x, int y, int w, int h, int mi_x,
int mi_y);
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
int ir, int ic, int mi_row, int mi_col);
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize, int plane);
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, const MV *mv_q3,
const struct scale_factors *sf, int w, int h,
int do_avg, const InterpKernel *kernel,
enum mv_precision precision, int x, int y);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
const InterpKernel *kernel, enum mv_precision precision, int x, int y,
@@ -159,11 +159,11 @@
dst->stride = stride;
}
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col);
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col, const struct scale_factors *sf);
@@ -171,4 +171,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_RECONINTER_H_
+#endif // AV1_COMMON_RECONINTER_H_
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index bebfb79..d6e8605 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -12,9 +12,9 @@
#include "./aom_config.h"
#include "./aom_dsp_rtcd.h"
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#include "aom_dsp/aom_dsp_common.h"
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/aom_once.h"
@@ -100,7 +100,7 @@
orders_32x64, orders_64x32, orders_64x64,
};
-static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
+static int av1_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
int right_available, TX_SIZE txsz, int y, int x,
int ss_x) {
if (y == 0) {
@@ -133,7 +133,7 @@
}
}
-static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
+static int av1_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
int bottom_available, TX_SIZE txsz, int y, int x,
int ss_y) {
if (x == 0) {
@@ -171,15 +171,15 @@
static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
static intra_pred_fn dc_pred[2][2][TX_SIZES];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above, const uint16_t *left,
int bd);
static intra_high_pred_fn pred_high[INTRA_MODES][4];
static intra_high_pred_fn dc_pred_high[2][2][4];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void vp10_init_intra_predictors_internal(void) {
+static void av1_init_intra_predictors_internal(void) {
#define INIT_NO_4X4(p, type) \
p[TX_8X8] = aom_##type##_predictor_8x8; \
p[TX_16X16] = aom_##type##_predictor_16x16; \
@@ -210,7 +210,7 @@
INIT_ALL_SIZES(dc_pred[1][0], dc_left);
INIT_ALL_SIZES(dc_pred[1][1], dc);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
#if CONFIG_MISC_FIXES
@@ -231,7 +231,7 @@
INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#undef intra_pred_allsizes
}
@@ -242,7 +242,7 @@
}
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void build_intra_predictors_high(const MACROBLOCKD *xd,
const uint8_t *ref8, int ref_stride,
uint8_t *dst8, int dst_stride,
@@ -465,7 +465,7 @@
xd->bd);
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
@@ -675,7 +675,7 @@
}
}
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
TX_SIZE tx_size, PREDICTION_MODE mode,
const uint8_t *ref, int ref_stride, uint8_t *dst,
int dst_stride, int aoff, int loff, int plane) {
@@ -693,10 +693,10 @@
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int right_available =
mi_col + (bw >> !pd->subsampling_x) < xd->tile.mi_col_end;
- const int have_right = vp10_has_right(bsize, mi_row, mi_col, right_available,
+ const int have_right = av1_has_right(bsize, mi_row, mi_col, right_available,
tx_size, loff, aoff, pd->subsampling_x);
const int have_bottom =
- vp10_has_bottom(bsize, mi_row, mi_col, xd->mb_to_bottom_edge > 0, tx_size,
+ av1_has_bottom(bsize, mi_row, mi_col, xd->mb_to_bottom_edge > 0, tx_size,
loff, aoff, pd->subsampling_y);
const int wpx = 4 * bw;
const int hpx = 4 * bh;
@@ -711,7 +711,7 @@
#endif // CONFIG_MISC_FIXES
#if CONFIG_MISC_FIXES
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
tx_size, have_top ? VPXMIN(txpx, xr + txpx) : 0,
@@ -730,7 +730,7 @@
plane);
#else // CONFIG_MISC_FIXES
(void)bhl_in;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
tx_size, have_top, have_left, have_right, x, y,
@@ -743,6 +743,6 @@
#endif // CONFIG_MISC_FIXES
}
-void vp10_init_intra_predictors(void) {
- once(vp10_init_intra_predictors_internal);
+void av1_init_intra_predictors(void) {
+ once(av1_init_intra_predictors_internal);
}
diff --git a/av1/common/reconintra.h b/av1/common/reconintra.h
index 09d1d4b..9a00b9b 100644
--- a/av1/common/reconintra.h
+++ b/av1/common/reconintra.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_RECONINTRA_H_
-#define VP10_COMMON_RECONINTRA_H_
+#ifndef AV1_COMMON_RECONINTRA_H_
+#define AV1_COMMON_RECONINTRA_H_
#include "aom/aom_integer.h"
#include "av1/common/blockd.h"
@@ -19,9 +19,9 @@
extern "C" {
#endif
-void vp10_init_intra_predictors(void);
+void av1_init_intra_predictors(void);
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
TX_SIZE tx_size, PREDICTION_MODE mode,
const uint8_t *ref, int ref_stride, uint8_t *dst,
int dst_stride, int aoff, int loff, int plane);
@@ -29,4 +29,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_RECONINTRA_H_
+#endif // AV1_COMMON_RECONINTRA_H_
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 75ae029..b91c81d 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -35,7 +35,7 @@
return (other_size << REF_SCALE_SHIFT) / this_size;
}
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
@@ -43,12 +43,12 @@
return res;
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h,
int use_highbd) {
#else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h) {
#endif
if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
@@ -62,7 +62,7 @@
sf->x_step_q4 = scaled_x(16, sf);
sf->y_step_q4 = scaled_y(16, sf);
- if (vp10_is_scaled(sf)) {
+ if (av1_is_scaled(sf)) {
sf->scale_value_x = scaled_x;
sf->scale_value_y = scaled_y;
} else {
@@ -116,7 +116,7 @@
// 2D subpel motion always gets filtered in both directions
sf->predict[1][1][0] = aom_convolve8;
sf->predict[1][1][1] = aom_convolve8_avg;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_highbd) {
if (sf->x_step_q4 == 16) {
if (sf->y_step_q4 == 16) {
diff --git a/av1/common/scale.h b/av1/common/scale.h
index e69d338..29df9b6 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_SCALE_H_
-#define VP10_COMMON_SCALE_H_
+#ifndef AV1_COMMON_SCALE_H_
+#define AV1_COMMON_SCALE_H_
#include "av1/common/mv.h"
#include "aom_dsp/aom_convolve.h"
@@ -33,29 +33,29 @@
int (*scale_value_y)(int val, const struct scale_factors *sf);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_convolve_fn_t highbd_predict[2][2][2]; // horiz, vert, avg
#endif
};
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h,
int use_high);
#else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h);
#endif
-static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
+static INLINE int av1_is_valid_scale(const struct scale_factors *sf) {
return sf->x_scale_fp != REF_INVALID_SCALE &&
sf->y_scale_fp != REF_INVALID_SCALE;
}
-static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
- return vp10_is_valid_scale(sf) &&
+static INLINE int av1_is_scaled(const struct scale_factors *sf) {
+ return av1_is_valid_scale(sf) &&
(sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
}
@@ -69,4 +69,4 @@
} // extern "C"
#endif
-#endif // VP10_COMMON_SCALE_H_
+#endif // AV1_COMMON_SCALE_H_
diff --git a/av1/common/scan.c b/av1/common/scan.c
index cddc557..aac80d9 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -514,40 +514,40 @@
959, 990, 991, 1022, 0, 0,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_4x4[16]) = {
0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_4x4[16]) = {
0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_8x8[64]) = {
0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51,
2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56,
6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60,
14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_8x8[64]) = {
0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39,
6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x8[64]) = {
0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44,
3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53,
12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
25, 32, 39, 45, 50, 55, 59, 62, 33, 40, 46, 51, 54, 58, 61, 63,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_16x16[256]) = {
0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198,
1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212,
2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216,
@@ -566,7 +566,7 @@
65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_16x16[256]) = {
0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76,
86, 3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99,
115, 130, 8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103,
@@ -587,7 +587,7 @@
255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x16[256]) = {
0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166,
179, 1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154,
178, 196, 3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148,
@@ -608,7 +608,7 @@
255,
};
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x32[1024]) = {
0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145,
170, 193, 204, 210, 219, 229, 233, 245, 257, 275, 299, 342, 356,
377, 405, 455, 471, 495, 527, 1, 4, 8, 15, 22, 30, 45,
@@ -690,40 +690,40 @@
967, 973, 988, 996, 1002, 1006, 1014, 1018, 1021, 1023,
};
-const scan_order vp10_default_scan_orders[TX_SIZES] = {
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+const scan_order av1_default_scan_orders[TX_SIZES] = {
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
};
-const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_scan_orders[TX_SIZES][TX_TYPES] = {
{ // TX_4X4
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
- { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
- { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
- { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+ { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+ { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+ { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors } },
{ // TX_8X8
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
- { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
- { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
- { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+ { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+ { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+ { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors } },
{ // TX_16X16
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors },
- { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
- { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
- { default_scan_16x16, vp10_default_iscan_16x16,
+ { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+ { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+ { default_scan_16x16, av1_default_iscan_16x16,
default_scan_16x16_neighbors } },
{ // TX_32X32
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors },
- { default_scan_32x32, vp10_default_iscan_32x32,
+ { default_scan_32x32, av1_default_iscan_32x32,
default_scan_32x32_neighbors } },
};
diff --git a/av1/common/scan.h b/av1/common/scan.h
index 22d21b1..27cb99d 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_SCAN_H_
-#define VP10_COMMON_SCAN_H_
+#ifndef AV1_COMMON_SCAN_H_
+#define AV1_COMMON_SCAN_H_
#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
@@ -30,8 +30,8 @@
const int16_t *neighbors;
} scan_order;
-extern const scan_order vp10_default_scan_orders[TX_SIZES];
-extern const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES];
+extern const scan_order av1_default_scan_orders[TX_SIZES];
+extern const scan_order av1_scan_orders[TX_SIZES][TX_TYPES];
static INLINE int get_coef_context(const int16_t *neighbors,
const uint8_t *token_cache, int c) {
@@ -41,11 +41,11 @@
}
static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type) {
- return &vp10_scan_orders[tx_size][tx_type];
+ return &av1_scan_orders[tx_size][tx_type];
}
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_SCAN_H_
+#endif // AV1_COMMON_SCAN_H_
diff --git a/av1/common/seg_common.c b/av1/common/seg_common.c
index c7f428b..369a3e1 100644
--- a/av1/common/seg_common.c
+++ b/av1/common/seg_common.c
@@ -26,25 +26,25 @@
// the coding mechanism is still subject to change so these provide a
// convenient single point of change.
-void vp10_clearall_segfeatures(struct segmentation *seg) {
- vp10_zero(seg->feature_data);
- vp10_zero(seg->feature_mask);
+void av1_clearall_segfeatures(struct segmentation *seg) {
+ av1_zero(seg->feature_data);
+ av1_zero(seg->feature_mask);
}
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
seg->feature_mask[segment_id] |= 1 << feature_id;
}
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
return seg_feature_data_max[feature_id];
}
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
return seg_feature_data_signed[feature_id];
}
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
+void av1_set_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id, int seg_data) {
assert(seg_data <= seg_feature_data_max[feature_id]);
if (seg_data < 0) {
@@ -55,7 +55,7 @@
seg->feature_data[segment_id][feature_id] = seg_data;
}
-const aom_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
+const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
};
diff --git a/av1/common/seg_common.h b/av1/common/seg_common.h
index 9a0f2c2..eda022f 100644
--- a/av1/common/seg_common.h
+++ b/av1/common/seg_common.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_SEG_COMMON_H_
-#define VP10_COMMON_SEG_COMMON_H_
+#ifndef AV1_COMMON_SEG_COMMON_H_
+#define AV1_COMMON_SEG_COMMON_H_
#include "aom_dsp/prob.h"
@@ -57,16 +57,16 @@
return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
}
-void vp10_clearall_segfeatures(struct segmentation *seg);
+void av1_clearall_segfeatures(struct segmentation *seg);
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id);
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
+void av1_set_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id, int seg_data);
static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
@@ -74,10 +74,10 @@
return seg->feature_data[segment_id][feature_id];
}
-extern const aom_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
+extern const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_SEG_COMMON_H_
+#endif // AV1_COMMON_SEG_COMMON_H_
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index 502ba50..f068cf4 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -34,7 +34,7 @@
}
#endif // CONFIG_MULTITHREAD
-static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
+static INLINE void sync_read(AV1LfSync *const lf_sync, int r, int c) {
#if CONFIG_MULTITHREAD
const int nsync = lf_sync->sync_range;
@@ -54,7 +54,7 @@
#endif // CONFIG_MULTITHREAD
}
-static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
+static INLINE void sync_write(AV1LfSync *const lf_sync, int r, int c,
const int sb_cols) {
#if CONFIG_MULTITHREAD
const int nsync = lf_sync->sync_range;
@@ -87,9 +87,9 @@
// Implement row loopfiltering for each thread.
static INLINE void thread_loop_filter_rows(
- const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+ const YV12_BUFFER_CONFIG *const frame_buffer, AV1_COMMON *const cm,
struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
- int y_only, VP10LfSync *const lf_sync) {
+ int y_only, AV1LfSync *const lf_sync) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
int mi_row, mi_col;
@@ -115,22 +115,22 @@
sync_read(lf_sync, r, c);
- vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+ av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
// TODO(JBB): Make setup_mask work for non 420.
- vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+ av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
- vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
for (plane = 1; plane < num_planes; ++plane) {
switch (path) {
case LF_PATH_420:
- vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_444:
- vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+ av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_SLOW:
- vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
mi_row, mi_col);
break;
}
@@ -142,7 +142,7 @@
}
// Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP10LfSync *const lf_sync,
+static int loop_filter_row_worker(AV1LfSync *const lf_sync,
LFWorkerData *const lf_data) {
thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
lf_data->start, lf_data->stop, lf_data->y_only,
@@ -150,11 +150,11 @@
return 1;
}
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only,
VPxWorker *workers, int nworkers,
- VP10LfSync *lf_sync) {
+ AV1LfSync *lf_sync) {
const VPxWorkerInterface *const winterface = aom_get_worker_interface();
// Number of superblock rows and cols
const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
@@ -166,8 +166,8 @@
if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
num_workers > lf_sync->num_workers) {
- vp10_loop_filter_dealloc(lf_sync);
- vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+ av1_loop_filter_dealloc(lf_sync);
+ av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
}
// Initialize cur_sb_col to -1 for all SB rows.
@@ -190,7 +190,7 @@
worker->data2 = lf_data;
// Loopfilter data
- vp10_loop_filter_data_reset(lf_data, frame, cm, planes);
+ av1_loop_filter_data_reset(lf_data, frame, cm, planes);
lf_data->start = start + i * MI_BLOCK_SIZE;
lf_data->stop = stop;
lf_data->y_only = y_only;
@@ -209,11 +209,11 @@
}
}
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int frame_filter_level, int y_only,
int partial_frame, VPxWorker *workers,
- int num_workers, VP10LfSync *lf_sync) {
+ int num_workers, AV1LfSync *lf_sync) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (!frame_filter_level) return;
@@ -226,7 +226,7 @@
mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
- vp10_loop_filter_frame_init(cm, frame_filter_level);
+ av1_loop_filter_frame_init(cm, frame_filter_level);
loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
workers, num_workers, lf_sync);
@@ -247,7 +247,7 @@
}
// Allocate memory for lf row synchronization
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
int width, int num_workers) {
lf_sync->rows = rows;
#if CONFIG_MULTITHREAD
@@ -284,7 +284,7 @@
}
// Deallocate lf synchronization related mutex and data
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync) {
if (lf_sync != NULL) {
#if CONFIG_MULTITHREAD
int i;
@@ -306,12 +306,12 @@
aom_free(lf_sync->cur_sb_col);
// clear the structure as the source of this call may be a resize in which
// case this call will be followed by an _alloc() which may fail.
- vp10_zero(*lf_sync);
+ av1_zero(*lf_sync);
}
}
// Accumulate frame counts.
-void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
+void av1_accumulate_frame_counts(AV1_COMMON *cm, FRAME_COUNTS *counts,
int is_dec) {
int i, j, k, l, m;
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index c514865..5b0dfc0 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_
-#define VP10_COMMON_LOOPFILTER_THREAD_H_
+#ifndef AV1_COMMON_LOOPFILTER_THREAD_H_
+#define AV1_COMMON_LOOPFILTER_THREAD_H_
#include "./aom_config.h"
#include "av1/common/loopfilter.h"
#include "aom_util/aom_thread.h"
@@ -19,11 +19,11 @@
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
struct FRAME_COUNTS;
// Loopfilter row synchronization
-typedef struct VP10LfSyncData {
+typedef struct AV1LfSyncData {
#if CONFIG_MULTITHREAD
pthread_mutex_t *mutex_;
pthread_cond_t *cond_;
@@ -38,27 +38,27 @@
// Row-based parallel loopfilter data
LFWorkerData *lfdata;
int num_workers;
-} VP10LfSync;
+} AV1LfSync;
// Allocate memory for loopfilter row synchronization.
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, struct AV1Common *cm,
int rows, int width, int num_workers);
// Deallocate loopfilter synchronization related mutex and data.
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync);
// Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int frame_filter_level, int y_only,
int partial_frame, VPxWorker *workers,
- int num_workers, VP10LfSync *lf_sync);
+ int num_workers, AV1LfSync *lf_sync);
-void vp10_accumulate_frame_counts(struct VP10Common *cm,
+void av1_accumulate_frame_counts(struct AV1Common *cm,
struct FRAME_COUNTS *counts, int is_dec);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_LOOPFILTER_THREAD_H_
+#endif // AV1_COMMON_LOOPFILTER_THREAD_H_
diff --git a/av1/common/tile_common.c b/av1/common/tile_common.c
index e0a0572..ebe6d67 100644
--- a/av1/common/tile_common.c
+++ b/av1/common/tile_common.c
@@ -22,19 +22,19 @@
return VPXMIN(offset, mis);
}
-void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) {
+void av1_tile_set_row(TileInfo *tile, const AV1_COMMON *cm, int row) {
tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows);
tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows);
}
-void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) {
+void av1_tile_set_col(TileInfo *tile, const AV1_COMMON *cm, int col) {
tile->mi_col_start = get_tile_offset(col, cm->mi_cols, cm->log2_tile_cols);
tile->mi_col_end = get_tile_offset(col + 1, cm->mi_cols, cm->log2_tile_cols);
}
-void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
- vp10_tile_set_row(tile, cm, row);
- vp10_tile_set_col(tile, cm, col);
+void av1_tile_init(TileInfo *tile, const AV1_COMMON *cm, int row, int col) {
+ av1_tile_set_row(tile, cm, row);
+ av1_tile_set_col(tile, cm, col);
}
static int get_min_log2_tile_cols(const int sb64_cols) {
@@ -49,7 +49,7 @@
return max_log2 - 1;
}
-void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+void av1_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
int *max_log2_tile_cols) {
const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
*min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols);
diff --git a/av1/common/tile_common.h b/av1/common/tile_common.h
index ca77ecc..23a7ce0 100644
--- a/av1/common/tile_common.h
+++ b/av1/common/tile_common.h
@@ -9,14 +9,14 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_COMMON_TILE_COMMON_H_
-#define VP10_COMMON_TILE_COMMON_H_
+#ifndef AV1_COMMON_TILE_COMMON_H_
+#define AV1_COMMON_TILE_COMMON_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Common;
+struct AV1Common;
typedef struct TileInfo {
int mi_row_start, mi_row_end;
@@ -25,17 +25,17 @@
// initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
// 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
+void av1_tile_init(TileInfo *tile, const struct AV1Common *cm, int row,
int col);
-void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
-void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
+void av1_tile_set_row(TileInfo *tile, const struct AV1Common *cm, int row);
+void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
-void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+void av1_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
int *max_log2_tile_cols);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_COMMON_TILE_COMMON_H_
+#endif // AV1_COMMON_TILE_COMMON_H_
diff --git a/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
index b6c38aa..876e579 100644
--- a/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
@@ -23,31 +23,31 @@
#define ADD_EPI16 _mm_adds_epi16
#define SUB_EPI16 _mm_subs_epi16
#if FDCT32x32_HIGH_PRECISION
-void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vp10_fdct32(temp_in, temp_out, 0);
+ av1_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
}
}
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rows_c
#else
-void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vp10_fdct32(temp_in, temp_out, 1);
+ av1_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rd_rows_c
#endif // FDCT32x32_HIGH_PRECISION
#else
#define ADD_EPI16 _mm_add_epi16
diff --git a/av1/common/x86/av1_fwd_txfm_sse2.c b/av1/common/x86/av1_fwd_txfm_sse2.c
index ee78f89..c4d0b0c 100644
--- a/av1/common/x86/av1_fwd_txfm_sse2.c
+++ b/av1/common/x86/av1_fwd_txfm_sse2.c
@@ -15,7 +15,7 @@
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
-void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0, in1;
__m128i tmp;
const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
store_output(&in0, output);
}
-void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
__m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
__m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,7 +84,7 @@
store_output(&in1, output);
}
-void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
@@ -153,7 +153,7 @@
store_output(&in1, output);
}
-void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
@@ -226,47 +226,47 @@
}
#define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vp10_fdct4x4_sse2
-#define FDCT8x8_2D vp10_fdct8x8_sse2
-#define FDCT16x16_2D vp10_fdct16x16_sse2
+#define FDCT4x4_2D av1_fdct4x4_sse2
+#define FDCT8x8_2D av1_fdct8x8_sse2
+#define FDCT16x16_2D av1_fdct16x16_sse2
#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vp10_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vp10_fdct32x32_sse2
+#define FDCT32x32_2D av1_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
+#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
+#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
+#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h" // NOLINT
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/av1_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
index ede75f9..e8c5072 100644
--- a/av1/common/x86/av1_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -22,7 +22,7 @@
*(int *)(dest) = _mm_cvtsi128_si32(d0); \
}
-void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
const __m128i cst = _mm_setr_epi16(
@@ -152,7 +152,7 @@
}
}
-void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -177,7 +177,7 @@
res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
}
-void vp10_idct4_sse2(__m128i *in) {
+void av1_idct4_sse2(__m128i *in) {
const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -213,7 +213,7 @@
in[1] = _mm_shuffle_epi32(in[1], 0x4E);
}
-void vp10_iadst4_sse2(__m128i *in) {
+void av1_iadst4_sse2(__m128i *in) {
const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@@ -447,7 +447,7 @@
out7 = _mm_subs_epi16(stp1_0, stp2_7); \
}
-void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -478,11 +478,11 @@
// 2-D
for (i = 0; i < 2; i++) {
- // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+ // 8x8 Transpose is copied from av1_fdct8x8_sse2()
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
- // 4-stage 1D vp10_idct8x8
+ // 4-stage 1D av1_idct8x8
IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
in6, in7);
}
@@ -516,7 +516,7 @@
RECON_AND_STORE(dest + 7 * stride, in7);
}
-void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -537,7 +537,7 @@
RECON_AND_STORE(dest + 7 * stride, dc_value);
}
-void vp10_idct8_sse2(__m128i *in) {
+void av1_idct8_sse2(__m128i *in) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
@@ -553,16 +553,16 @@
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+ // 8x8 Transpose is copied from av1_fdct8x8_sse2()
TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
in1, in2, in3, in4, in5, in6, in7);
- // 4-stage 1D vp10_idct8x8
+ // 4-stage 1D av1_idct8x8
IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
in[4], in[5], in[6], in[7]);
}
-void vp10_iadst8_sse2(__m128i *in) {
+void av1_iadst8_sse2(__m128i *in) {
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@@ -790,7 +790,7 @@
in[7] = _mm_sub_epi16(k__const_0, s1);
}
-void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1159,7 +1159,7 @@
stp2_12) \
}
-void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -1201,7 +1201,7 @@
curr1 = l;
for (i = 0; i < 2; i++) {
- // 1-D vp10_idct
+ // 1-D av1_idct
// Load input data.
in[0] = _mm_load_si128((const __m128i *)input);
@@ -1249,7 +1249,7 @@
}
for (i = 0; i < 2; i++) {
int j;
- // 1-D vp10_idct
+ // 1-D av1_idct
array_transpose_8x8(l + i * 8, in);
array_transpose_8x8(r + i * 8, in + 8);
@@ -1284,7 +1284,7 @@
}
}
-void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -1317,7 +1317,7 @@
}
}
-static void vp10_iadst16_8col(__m128i *in) {
+static void av1_iadst16_8col(__m128i *in) {
// perform 16x16 1-D ADST for 8 columns
__m128i s[16], x[16], u[32], v[32];
const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@@ -1787,7 +1787,7 @@
in[15] = _mm_sub_epi16(kZero, s[1]);
}
-static void vp10_idct16_8col(__m128i *in) {
+static void av1_idct16_8col(__m128i *in) {
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -2131,19 +2131,19 @@
in[15] = _mm_sub_epi16(s[0], s[15]);
}
-void vp10_idct16_sse2(__m128i *in0, __m128i *in1) {
+void av1_idct16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
- vp10_idct16_8col(in0);
- vp10_idct16_8col(in1);
+ av1_idct16_8col(in0);
+ av1_idct16_8col(in1);
}
-void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
+void av1_iadst16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
- vp10_iadst16_8col(in0);
- vp10_iadst16_8col(in1);
+ av1_iadst16_8col(in0);
+ av1_iadst16_8col(in1);
}
-void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3017,12 +3017,12 @@
}
// Only upper-left 8x8 has non-zero coeff
-void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
- // vp10_idct constants for each stage
+ // av1_idct constants for each stage
const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
@@ -3174,13 +3174,13 @@
}
}
-void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
const __m128i zero = _mm_setzero_si128();
- // vp10_idct constants for each stage
+ // av1_idct constants for each stage
const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
@@ -3242,7 +3242,7 @@
for (i = 0; i < 4; i++) {
i32 = (i << 5);
- // First 1-D vp10_idct
+ // First 1-D av1_idct
// Load input data.
LOAD_DQCOEFF(in[0], input);
LOAD_DQCOEFF(in[8], input);
@@ -3392,7 +3392,7 @@
col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
}
for (i = 0; i < 4; i++) {
- // Second 1-D vp10_idct
+ // Second 1-D av1_idct
j = i << 3;
// Transpose 32x8 block to 8x32 block
@@ -3448,7 +3448,7 @@
}
}
-void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -3469,7 +3469,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
__m128i ubounded, retval;
const __m128i zero = _mm_set1_epi16(0);
@@ -3483,7 +3483,7 @@
return retval;
}
-void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
@@ -3517,7 +3517,7 @@
if (!test) {
// Do the row transform
- vp10_idct4_sse2(inptr);
+ av1_idct4_sse2(inptr);
// Check the min & max values
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3546,14 +3546,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct4_c(input, outptr, bd);
+ av1_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
}
if (optimised_cols) {
- vp10_idct4_sse2(inptr);
+ av1_idct4_sse2(inptr);
// Final round and shift
inptr[0] = _mm_add_epi16(inptr[0], eight);
@@ -3589,7 +3589,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vp10_highbd_idct4_c(temp_in, temp_out, bd);
+ av1_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3598,7 +3598,7 @@
}
}
-void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
@@ -3633,7 +3633,7 @@
if (!test) {
// Do the row transform
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3663,14 +3663,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 8; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
}
if (optimised_cols) {
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
@@ -3689,7 +3689,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3698,7 +3698,7 @@
}
}
-void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -3734,7 +3734,7 @@
if (!test) {
// Do the row transform
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
@@ -3766,14 +3766,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct8_c(input, outptr, bd);
+ av1_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
}
if (optimised_cols) {
- vp10_idct8_sse2(inptr);
+ av1_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
@@ -3792,7 +3792,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vp10_highbd_idct8_c(temp_in, temp_out, bd);
+ av1_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3801,7 +3801,7 @@
}
}
-void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -3839,7 +3839,7 @@
if (!test) {
// Do the row transform
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3874,14 +3874,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 16; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
}
if (optimised_cols) {
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
@@ -3905,7 +3905,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3914,7 +3914,7 @@
}
}
-void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -3954,7 +3954,7 @@
if (!test) {
// Do the row transform (N.B. This transposes inptr)
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
@@ -3992,14 +3992,14 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp10_highbd_idct16_c(input, outptr, bd);
+ av1_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
}
if (optimised_cols) {
- vp10_idct16_sse2(inptr, inptr + 16);
+ av1_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
@@ -4023,7 +4023,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vp10_highbd_idct16_c(temp_in, temp_out, bd);
+ av1_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4031,4 +4031,4 @@
}
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 4b948f9..2247b67 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -13,7 +13,7 @@
#include "aom_dsp/x86/txfm_common_sse2.h"
#include "aom_ports/mem.h"
-void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
__m128i in[2];
const __m128i zero = _mm_setzero_si128();
@@ -76,7 +76,7 @@
}
}
-void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
__m128i in[8];
const __m128i zero = _mm_setzero_si128();
@@ -141,7 +141,7 @@
RECON_AND_STORE(dest + 7 * stride, in[7]);
}
-void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+void av1_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, int tx_type) {
__m128i in0[16], in1[16];
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 555032a..ec80ec6 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -51,9 +51,9 @@
#include "av1/decoder/decoder.h"
#include "av1/decoder/dsubexp.h"
-#define MAX_VP10_HEADER_SIZE 80
+#define MAX_AV1_HEADER_SIZE 80
-static int is_compound_reference_allowed(const VP10_COMMON *cm) {
+static int is_compound_reference_allowed(const AV1_COMMON *cm) {
int i;
if (frame_is_intra_only(cm)) return 0;
for (i = 1; i < REFS_PER_FRAME; ++i)
@@ -62,7 +62,7 @@
return 0;
}
-static void setup_compound_reference_mode(VP10_COMMON *cm) {
+static void setup_compound_reference_mode(AV1_COMMON *cm) {
if (cm->ref_frame_sign_bias[LAST_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
cm->comp_fixed_ref = ALTREF_FRAME;
@@ -106,34 +106,34 @@
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 3; ++j)
- vp10_diff_update_prob(r, &tx_probs->p8x8[i][j]);
+ av1_diff_update_prob(r, &tx_probs->p8x8[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 2; ++j)
- vp10_diff_update_prob(r, &tx_probs->p16x16[i][j]);
+ av1_diff_update_prob(r, &tx_probs->p16x16[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 1; ++j)
- vp10_diff_update_prob(r, &tx_probs->p32x32[i][j]);
+ av1_diff_update_prob(r, &tx_probs->p32x32[i][j]);
}
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
- vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+ av1_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}
static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
for (j = 0; j < INTER_MODES - 1; ++j)
- vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+ av1_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
}
#if CONFIG_MISC_FIXES
static REFERENCE_MODE read_frame_reference_mode(
- const VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
if (is_compound_reference_allowed(cm)) {
return aom_rb_read_bit(rb)
? REFERENCE_MODE_SELECT
@@ -143,7 +143,7 @@
}
}
#else
-static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
+static REFERENCE_MODE read_frame_reference_mode(const AV1_COMMON *cm,
aom_reader *r) {
if (is_compound_reference_allowed(cm)) {
return aom_read_bit(r)
@@ -155,30 +155,30 @@
}
#endif
-static void read_frame_reference_mode_probs(VP10_COMMON *cm, aom_reader *r) {
+static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
FRAME_CONTEXT *const fc = cm->fc;
int i;
if (cm->reference_mode == REFERENCE_MODE_SELECT)
for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
+ av1_diff_update_prob(r, &fc->comp_inter_prob[i]);
if (cm->reference_mode != COMPOUND_REFERENCE)
for (i = 0; i < REF_CONTEXTS; ++i) {
- vp10_diff_update_prob(r, &fc->single_ref_prob[i][0]);
- vp10_diff_update_prob(r, &fc->single_ref_prob[i][1]);
+ av1_diff_update_prob(r, &fc->single_ref_prob[i][0]);
+ av1_diff_update_prob(r, &fc->single_ref_prob[i][1]);
}
if (cm->reference_mode != SINGLE_REFERENCE)
for (i = 0; i < REF_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->comp_ref_prob[i]);
+ av1_diff_update_prob(r, &fc->comp_ref_prob[i]);
}
static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
int i;
for (i = 0; i < n; ++i)
#if CONFIG_MISC_FIXES
- vp10_diff_update_prob(r, &p[i]);
+ av1_diff_update_prob(r, &p[i]);
#else
if (aom_read(r, MV_UPDATE_PROB)) p[i] = (aom_read_literal(r, 7) << 1) | 1;
#endif
@@ -221,48 +221,48 @@
const int seg_id = xd->mi[0]->mbmi.segment_id;
if (eob > 0) {
tran_low_t *const dqcoeff = pd->dqcoeff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_4X4:
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
tx_type, xd->lossless[seg_id]);
break;
case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
default: assert(0 && "Invalid transform size"); return;
}
} else {
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_4X4:
- vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
+ av1_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
xd->lossless[seg_id]);
break;
case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
break;
case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
break;
case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
break;
default: assert(0 && "Invalid transform size"); return;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (eob == 1) {
dqcoeff[0] = 0;
@@ -285,48 +285,48 @@
const int seg_id = xd->mi[0]->mbmi.segment_id;
if (eob > 0) {
tran_low_t *const dqcoeff = pd->dqcoeff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_4X4:
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
tx_type, xd->lossless[seg_id]);
break;
case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
+ av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
tx_type);
break;
default: assert(0 && "Invalid transform size"); return;
}
} else {
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_4X4:
- vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
+ av1_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
xd->lossless[seg_id]);
break;
case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
break;
case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
break;
case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
+ av1_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
break;
default: assert(0 && "Invalid transform size"); return;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (eob == 1) {
dqcoeff[0] = 0;
@@ -356,14 +356,14 @@
if (mbmi->sb_type < BLOCK_8X8)
if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
- vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+ av1_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
pd->dst.stride, dst, pd->dst.stride, col, row,
plane);
if (!mbmi->skip) {
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
const scan_order *sc = get_scan(tx_size, tx_type);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
r, mbmi->segment_id);
inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
pd->dst.stride, eob);
@@ -378,7 +378,7 @@
int block_idx = (row << 1) + col;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
const scan_order *sc = get_scan(tx_size, tx_type);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
mbmi->segment_id);
inverse_transform_block_inter(
@@ -423,7 +423,7 @@
} while (--b_h);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void high_build_mc_border(const uint8_t *src8, int src_stride,
uint16_t *dst, int dst_stride, int x, int y,
int b_w, int b_h, int w, int h) {
@@ -460,9 +460,9 @@
if (y > 0 && y < h) ref_row += src_stride;
} while (--b_h);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
int x0, int y0, int b_w, int b_h,
int frame_width, int frame_height,
@@ -511,15 +511,15 @@
inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w,
h, ref, kernel, xs, ys);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static void dec_build_inter_predictors(
- VP10Decoder *const pbi, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
+ AV1Decoder *const pbi, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel,
const struct scale_factors *sf, struct buf_2d *pre_buf,
struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf,
int is_scaled, int ref) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
struct macroblockd_plane *const pd = &xd->plane[plane];
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
MV32 scaled_mv;
@@ -561,7 +561,7 @@
// Scale the MV and incorporate the sub-pixel offset of the block
// in the reference frame.
- scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
@@ -616,7 +616,7 @@
// Wait until reference block is ready. Pad 7 more pixels as last 7
// pixels of each superblock row can be changed by next superblock row.
if (cm->frame_parallel_decode)
- vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ av1_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
// Skip border extension if block is inside the frame.
@@ -631,7 +631,7 @@
extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width,
frame_height, border_offset, dst, dst_buf->stride,
subpel_x, subpel_y, kernel, sf,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd,
#endif
w, h, ref, xs, ys);
@@ -642,11 +642,11 @@
// pixels of each superblock row can be changed by next superblock row.
if (cm->frame_parallel_decode) {
const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
- vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ av1_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
@@ -657,17 +657,17 @@
#else
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y,
sf, w, h, ref, kernel, xs, ys);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
-static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
+static void dec_build_inter_predictors_sb(AV1Decoder *const pbi,
MACROBLOCKD *xd, int mi_row,
int mi_col) {
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
const MODE_INFO *mi = xd->mi[0];
- const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+ const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
const int is_compound = has_second_ref(&mi->mbmi);
@@ -687,7 +687,7 @@
const int idx = xd->block_refs[ref]->idx;
BufferPool *const pool = pbi->common.buffer_pool;
RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
- const int is_scaled = vp10_is_scaled(sf);
+ const int is_scaled = av1_is_scaled(sf);
if (sb_type < BLOCK_8X8) {
const PARTITION_TYPE bp = BLOCK_8X8 - sb_type;
@@ -744,7 +744,7 @@
}
}
-static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static MB_MODE_INFO *set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int bw, int bh, int x_mis, int y_mis, int bwl,
int bhl) {
@@ -770,14 +770,14 @@
// as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
return &xd->mi[0]->mbmi;
}
-static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
int mi_row, int mi_col, aom_reader *r,
BLOCK_SIZE bsize, int bwl, int bhl) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
const int bw = 1 << (bwl - 1);
const int bh = 1 << (bhl - 1);
@@ -795,7 +795,7 @@
"Invalid block size.");
}
- vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+ av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
if (mbmi->skip) {
dec_reset_skip_context(xd);
@@ -895,7 +895,7 @@
memset(left_ctx, partition_context_lookup[subsize].left, bw);
}
-static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col, aom_reader *r,
int has_rows, int has_cols, int bsl) {
const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
@@ -904,7 +904,7 @@
PARTITION_TYPE p;
if (has_rows && has_cols)
- p = (PARTITION_TYPE)aom_read_tree(r, vp10_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
else if (!has_rows && has_cols)
p = aom_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
else if (has_rows && !has_cols)
@@ -918,10 +918,10 @@
}
// TODO(slavarnway): eliminate bsize and subsize in future commits
-static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
int mi_row, int mi_col, aom_reader *r,
BLOCK_SIZE bsize, int n4x4_l2) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int n8x8_l2 = n4x4_l2 - 1;
const int num_8x8_wh = 1 << n8x8_l2;
const int hbs = num_8x8_wh >> 1;
@@ -1002,7 +1002,7 @@
"Failed to allocate bool decoder %d", 1);
}
-static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
+static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
aom_reader *r) {
int i, j, k, l, m;
@@ -1012,7 +1012,7 @@
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
- vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+ av1_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
}
static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
@@ -1022,7 +1022,7 @@
read_coef_probs_common(fc->coef_probs[tx_size], r);
}
-static void setup_segmentation(VP10_COMMON *const cm,
+static void setup_segmentation(AV1_COMMON *const cm,
struct aom_read_bit_buffer *rb) {
struct segmentation *const seg = &cm->seg;
#if !CONFIG_MISC_FIXES
@@ -1069,19 +1069,19 @@
if (seg->update_data) {
seg->abs_delta = aom_rb_read_bit(rb);
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
int data = 0;
const int feature_enabled = aom_rb_read_bit(rb);
if (feature_enabled) {
- vp10_enable_segfeature(seg, i, j);
- data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
- if (vp10_is_segfeature_signed(j))
+ av1_enable_segfeature(seg, i, j);
+ data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
+ if (av1_is_segfeature_signed(j))
data = aom_rb_read_bit(rb) ? -data : data;
}
- vp10_set_segdata(seg, i, j, data);
+ av1_set_segdata(seg, i, j, data);
}
}
}
@@ -1114,13 +1114,13 @@
}
#if CONFIG_CLPF
-static void setup_clpf(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_clpf(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
cm->clpf = aom_rb_read_literal(rb, 1);
}
#endif
#if CONFIG_DERING
-static void setup_dering(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_dering(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
cm->dering_level = aom_rb_read_literal(rb, DERING_LEVEL_BITS);
}
#endif // CONFIG_DERING
@@ -1131,7 +1131,7 @@
: 0;
}
-static void setup_quantization(VP10_COMMON *const cm,
+static void setup_quantization(AV1_COMMON *const cm,
struct aom_read_bit_buffer *rb) {
cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
cm->y_dc_delta_q = read_delta_q(rb);
@@ -1150,7 +1150,7 @@
#endif
}
-static void setup_segmentation_dequant(VP10_COMMON *const cm) {
+static void setup_segmentation_dequant(AV1_COMMON *const cm) {
// Build y/uv dequant values based on segmentation.
int i = 0;
#if CONFIG_AOM_QM
@@ -1163,14 +1163,14 @@
#endif
if (cm->seg.enabled) {
for (i = 0; i < MAX_SEGMENTS; ++i) {
- const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
+ const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
cm->y_dequant[i][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[i][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[i][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -1192,12 +1192,12 @@
// When segmentation is disabled, only the first value is used. The
// remaining are don't cares.
cm->y_dequant[0][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[0][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[0][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
@@ -1219,14 +1219,14 @@
return aom_rb_read_bit(rb) ? SWITCHABLE : aom_rb_read_literal(rb, 2);
}
-static void setup_render_size(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
cm->render_width = cm->width;
cm->render_height = cm->height;
if (aom_rb_read_bit(rb))
- vp10_read_frame_size(rb, &cm->render_width, &cm->render_height);
+ av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
}
-static void resize_mv_buffer(VP10_COMMON *cm) {
+static void resize_mv_buffer(AV1_COMMON *cm) {
aom_free(cm->cur_frame->mvs);
cm->cur_frame->mi_rows = cm->mi_rows;
cm->cur_frame->mi_cols = cm->mi_cols;
@@ -1234,7 +1234,7 @@
sizeof(*cm->cur_frame->mvs));
}
-static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
+static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1247,16 +1247,16 @@
const int new_mi_cols =
ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
- // Allocations in vp10_alloc_context_buffers() depend on individual
+ // Allocations in av1_alloc_context_buffers() depend on individual
// dimensions as well as the overall size.
if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
- if (vp10_alloc_context_buffers(cm, width, height))
+ if (av1_alloc_context_buffers(cm, width, height))
aom_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate context buffers");
} else {
- vp10_set_mb_mi(cm, width, height);
+ av1_set_mb_mi(cm, width, height);
}
- vp10_init_context_buffers(cm);
+ av1_init_context_buffers(cm);
cm->width = width;
cm->height = height;
}
@@ -1266,10 +1266,10 @@
}
}
-static void setup_frame_size(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int width, height;
BufferPool *const pool = cm->buffer_pool;
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
resize_context_buffers(cm, width, height);
setup_render_size(cm, rb);
@@ -1277,7 +1277,7 @@
if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -1306,7 +1306,7 @@
ref_yss == this_yss;
}
-static void setup_frame_size_with_refs(VP10_COMMON *cm,
+static void setup_frame_size_with_refs(AV1_COMMON *cm,
struct aom_read_bit_buffer *rb) {
int width, height;
int found = 0, i;
@@ -1327,7 +1327,7 @@
}
if (!found) {
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
#if CONFIG_MISC_FIXES
setup_render_size(cm, rb);
#endif
@@ -1367,7 +1367,7 @@
if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -1388,9 +1388,9 @@
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
-static void setup_tile_info(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_tile_info(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int min_log2_tile_cols, max_log2_tile_cols, max_ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
max_ones = max_log2_tile_cols - min_log2_tile_cols;
@@ -1470,7 +1470,7 @@
*data += size;
}
-static void get_tile_buffers(VP10Decoder *pbi, const uint8_t *data,
+static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end, int tile_cols,
int tile_rows,
TileBuffer (*tile_buffers)[1 << 6]) {
@@ -1488,9 +1488,9 @@
}
}
-static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = aom_get_worker_interface();
const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
const int tile_cols = 1 << cm->log2_tile_cols;
@@ -1504,7 +1504,7 @@
pbi->lf_worker.data1 == NULL) {
CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
aom_memalign(32, sizeof(LFWorkerData)));
- pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
+ pbi->lf_worker.hook = (VPxWorkerHook)av1_loop_filter_worker;
if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
aom_internal_error(&cm->error, VPX_CODEC_ERROR,
"Loop filter thread creation failed");
@@ -1515,7 +1515,7 @@
LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// Be sure to sync as we might be resuming after a failed frame decode.
winterface->sync(&pbi->lf_worker);
- vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+ av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
pbi->mb.plane);
}
@@ -1552,12 +1552,12 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &cm->counts
: NULL;
- vp10_zero(tile_data->dqcoeff);
- vp10_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
+ av1_zero(tile_data->dqcoeff);
+ av1_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&tile_data->bit_reader, pbi->decrypt_cb,
pbi->decrypt_state);
- vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+ av1_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0];
tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1];
}
@@ -1565,16 +1565,16 @@
for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
TileInfo tile;
- vp10_tile_set_row(&tile, cm, tile_row);
+ av1_tile_set_row(&tile, cm, tile_row);
for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
mi_row += MI_BLOCK_SIZE) {
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
const int col =
pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
tile_data = pbi->tile_data + tile_cols * tile_row + col;
- vp10_tile_set_col(&tile, tile_data->cm, col);
- vp10_zero(tile_data->xd.left_context);
- vp10_zero(tile_data->xd.left_seg_context);
+ av1_tile_set_col(&tile, tile_data->cm, col);
+ av1_zero(tile_data->xd.left_context);
+ av1_zero(tile_data->xd.left_seg_context);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
decode_partition(pbi, &tile_data->xd, mi_row, mi_col,
@@ -1609,7 +1609,7 @@
// still be changed by the longest loopfilter of the next superblock
// row.
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
+ av1_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
}
}
@@ -1623,11 +1623,11 @@
}
#if CONFIG_CLPF
if (cm->clpf && !cm->skip_loop_filter)
- vp10_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+ av1_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
#endif
#if CONFIG_DERING
if (cm->dering_level && !cm->skip_loop_filter) {
- vp10_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
+ av1_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
}
#endif // CONFIG_DERING
@@ -1635,7 +1635,7 @@
tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
+ av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
return aom_reader_find_end(&tile_data->bit_reader);
}
@@ -1654,8 +1654,8 @@
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += MI_BLOCK_SIZE) {
- vp10_zero(tile_data->xd.left_context);
- vp10_zero(tile_data->xd.left_seg_context);
+ av1_zero(tile_data->xd.left_context);
+ av1_zero(tile_data->xd.left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
decode_partition(tile_data->pbi, &tile_data->xd, mi_row, mi_col,
@@ -1672,9 +1672,9 @@
return (int)(buf2->size - buf1->size);
}
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = aom_get_worker_interface();
const uint8_t *bit_reader_end = NULL;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
@@ -1762,7 +1762,7 @@
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const tile_data =
(TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_zero(tile_data->counts);
+ av1_zero(tile_data->counts);
}
}
@@ -1782,13 +1782,13 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &tile_data->counts
: NULL;
- vp10_zero(tile_data->dqcoeff);
- vp10_tile_init(tile, cm, 0, buf->col);
- vp10_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
+ av1_zero(tile_data->dqcoeff);
+ av1_tile_init(tile, cm, 0, buf->col);
+ av1_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&tile_data->bit_reader, pbi->decrypt_cb,
pbi->decrypt_state);
- vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+ av1_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0];
tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1];
@@ -1827,7 +1827,7 @@
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const tile_data =
(TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_accumulate_frame_counts(cm, &tile_data->counts, 1);
+ av1_accumulate_frame_counts(cm, &tile_data->counts, 1);
}
}
}
@@ -1836,20 +1836,20 @@
}
static void error_handler(void *data) {
- VP10_COMMON *const cm = (VP10_COMMON *)data;
+ AV1_COMMON *const cm = (AV1_COMMON *)data;
aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
}
-static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
+static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
struct aom_read_bit_buffer *rb) {
if (cm->profile >= PROFILE_2) {
cm->bit_depth = aom_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 1;
#endif
} else {
cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 0;
#endif
}
@@ -1884,9 +1884,9 @@
}
}
-static size_t read_uncompressed_header(VP10Decoder *pbi,
+static size_t read_uncompressed_header(AV1Decoder *pbi,
struct aom_read_bit_buffer *rb) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = pool->frame_bufs;
@@ -1900,8 +1900,8 @@
aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame marker");
- cm->profile = vp10_read_profile(rb);
-#if CONFIG_VPX_HIGHBITDEPTH
+ cm->profile = av1_read_profile(rb);
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->profile >= MAX_PROFILES)
aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
@@ -1941,7 +1941,7 @@
cm->error_resilient_mode = aom_rb_read_bit(rb);
if (cm->frame_type == KEY_FRAME) {
- if (!vp10_read_sync_code(rb))
+ if (!av1_read_sync_code(rb))
aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
@@ -1990,7 +1990,7 @@
}
if (cm->intra_only) {
- if (!vp10_read_sync_code(rb))
+ if (!av1_read_sync_code(rb))
aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
#if CONFIG_MISC_FIXES
@@ -2000,14 +2000,14 @@
read_bitdepth_colorspace_sampling(cm, rb);
} else {
// NOTE: The intra-only frame header does not include the specification
- // of either the color format or color sub-sampling in profile 0. VP10
+ // of either the color format or color sub-sampling in profile 0. AV1
// specifies that the default color format should be YUV 4:2:0 in this
// case (normative).
cm->color_space = VPX_CS_BT_601;
cm->color_range = 0;
cm->subsampling_y = cm->subsampling_x = 1;
cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 0;
#endif
}
@@ -2037,20 +2037,20 @@
for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
-#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height,
cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height);
#endif
}
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
#endif
get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -2081,7 +2081,7 @@
cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF;
}
- // This flag will be overridden by the call to vp10_setup_past_independence
+ // This flag will be overridden by the call to av1_setup_past_independence
// below, forcing the use of context 0 for those frame types.
cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
@@ -2110,7 +2110,7 @@
pbi->hold_ref_buf = 1;
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
- vp10_setup_past_independence(cm);
+ av1_setup_past_independence(cm);
setup_loopfilter(&cm->lf, rb);
#if CONFIG_CLPF
@@ -2120,7 +2120,7 @@
setup_dering(cm, rb);
#endif
setup_quantization(cm, rb);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->bd = (int)cm->bit_depth;
#endif
@@ -2130,7 +2130,7 @@
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
- ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2160,20 +2160,20 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
+ av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
+ av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
}
}
}
-static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
+static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
size_t partition_size) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
#if !CONFIG_MISC_FIXES
MACROBLOCKD *const xd = &pbi->mb;
#endif
@@ -2193,34 +2193,34 @@
read_coef_probs(fc, cm->tx_mode, &r);
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_diff_update_prob(&r, &fc->skip_probs[k]);
+ av1_diff_update_prob(&r, &fc->skip_probs[k]);
#if CONFIG_MISC_FIXES
if (cm->seg.enabled) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
}
for (k = 0; k < MAX_SEGMENTS - 1; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
}
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#endif
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
#if CONFIG_MISC_FIXES
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
+ av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
#endif
} else {
nmv_context *const nmvc = &fc->nmvc;
@@ -2230,7 +2230,7 @@
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+ av1_diff_update_prob(&r, &fc->intra_inter_prob[i]);
#if !CONFIG_MISC_FIXES
cm->reference_mode = read_frame_reference_mode(cm, &r);
@@ -2241,12 +2241,12 @@
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
#if !CONFIG_MISC_FIXES
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#endif
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
@@ -2261,9 +2261,9 @@
#else // !NDEBUG
// Counts should only be incremented when frame_parallel_decoding_mode and
// error_resilient_mode are disabled.
-static void debug_check_frame_counts(const VP10_COMMON *const cm) {
+static void debug_check_frame_counts(const AV1_COMMON *const cm) {
FRAME_COUNTS zero_counts;
- vp10_zero(zero_counts);
+ av1_zero(zero_counts);
assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
cm->error_resilient_mode);
assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
@@ -2298,13 +2298,13 @@
#endif // NDEBUG
static struct aom_read_bit_buffer *init_read_bit_buffer(
- VP10Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
- const uint8_t *data_end, uint8_t clear_data[MAX_VP10_HEADER_SIZE]) {
+ AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
if (pbi->decrypt_cb) {
- const int n = (int)VPXMIN(MAX_VP10_HEADER_SIZE, data_end - data);
+ const int n = (int)VPXMIN(MAX_AV1_HEADER_SIZE, data_end - data);
pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
rb->bit_buffer = clear_data;
rb->bit_buffer_end = clear_data + n;
@@ -2317,32 +2317,32 @@
//------------------------------------------------------------------------------
-int vp10_read_sync_code(struct aom_read_bit_buffer *const rb) {
- return aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
- aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
- aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
+ return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
}
-void vp10_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
int *height) {
*width = aom_rb_read_literal(rb, 16) + 1;
*height = aom_rb_read_literal(rb, 16) + 1;
}
-BITSTREAM_PROFILE vp10_read_profile(struct aom_read_bit_buffer *rb) {
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
int profile = aom_rb_read_bit(rb);
profile |= aom_rb_read_bit(rb) << 1;
if (profile > 2) profile += aom_rb_read_bit(rb);
return (BITSTREAM_PROFILE)profile;
}
-void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
+void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end, const uint8_t **p_data_end) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct aom_read_bit_buffer rb;
int context_updated = 0;
- uint8_t clear_data[MAX_VP10_HEADER_SIZE];
+ uint8_t clear_data[MAX_AV1_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(
pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
const int tile_rows = 1 << cm->log2_tile_rows;
@@ -2366,14 +2366,14 @@
cm->height == cm->last_height && !cm->last_intra_only &&
cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
- vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
if (!cm->fc->initialized)
aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Uninitialized entropy context.");
- vp10_zero(cm->counts);
+ av1_zero(cm->counts);
xd->corrupted = 0;
new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
@@ -2382,7 +2382,7 @@
"Decode failed. Frame data header is corrupted.");
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
+ av1_loop_filter_frame_init(cm, cm->lf.filter_level);
}
// If encoded in frame parallel mode, frame context is ready after decoding
@@ -2395,13 +2395,13 @@
context_updated = 1;
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
frame_worker_data->frame_context_ready = 1;
// Signal the main thread that context is ready.
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
}
if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
@@ -2411,7 +2411,7 @@
if (!cm->skip_loop_filter) {
// If multiple threads are used to decode tiles, then we use those
// threads to do parallel loopfiltering.
- vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
+ av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
cm->lf.filter_level, 0, 0, pbi->tile_workers,
pbi->num_tile_workers, &pbi->lf_row_sync);
}
@@ -2425,17 +2425,17 @@
if (!xd->corrupted) {
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_adapt_coef_probs(cm);
+ av1_adapt_coef_probs(cm);
#if CONFIG_MISC_FIXES
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
#endif
if (!frame_is_intra_only(cm)) {
#if !CONFIG_MISC_FIXES
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
#endif
- vp10_adapt_inter_frame_probs(cm);
- vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ av1_adapt_inter_frame_probs(cm);
+ av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
} else {
debug_check_frame_counts(cm);
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index a3908a1..f6e35a4 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -9,26 +9,26 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DECODEFRAME_H_
-#define VP10_DECODER_DECODEFRAME_H_
+#ifndef AV1_DECODER_DECODEFRAME_H_
+#define AV1_DECODER_DECODEFRAME_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Decoder;
+struct AV1Decoder;
struct aom_read_bit_buffer;
-int vp10_read_sync_code(struct aom_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
int *height);
-BITSTREAM_PROFILE vp10_read_profile(struct aom_read_bit_buffer *rb);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
-void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
+void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEFRAME_H_
+#endif // AV1_DECODER_DECODEFRAME_H_
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index c4c550b..870dc5c 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -26,10 +26,10 @@
#include "aom_dsp/aom_dsp_common.h"
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
- return (PREDICTION_MODE)aom_read_tree(r, vp10_intra_mode_tree, p);
+ return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
}
-static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
@@ -38,7 +38,7 @@
return y_mode;
}
-static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r,
PREDICTION_MODE y_mode) {
const PREDICTION_MODE uv_mode =
@@ -48,10 +48,10 @@
return uv_mode;
}
-static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_inter_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int ctx) {
const int mode =
- aom_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+ aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_mode[ctx][mode];
@@ -60,10 +60,10 @@
static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
- return aom_read_tree(r, vp10_segment_tree, segp->tree_probs);
+ return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
}
-static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
+static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
TX_SIZE max_tx_size, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
@@ -79,7 +79,7 @@
return (TX_SIZE)tx_size;
}
-static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, int allow_select,
+static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int allow_select,
aom_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
@@ -91,7 +91,7 @@
return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
}
-static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
+static int dec_get_segment_id(const AV1_COMMON *cm, const uint8_t *segment_ids,
int mi_offset, int x_mis, int y_mis) {
int x, y, segment_id = INT_MAX;
@@ -104,7 +104,7 @@
return segment_id;
}
-static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+static void set_segment_id(AV1_COMMON *cm, int mi_offset, int x_mis, int y_mis,
int segment_id) {
int x, y;
@@ -115,7 +115,7 @@
cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
}
-static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int mi_offset, int x_mis, int y_mis,
aom_reader *r) {
struct segmentation *const seg = &cm->seg;
@@ -143,7 +143,7 @@
return segment_id;
}
-static void copy_segment_id(const VP10_COMMON *cm,
+static void copy_segment_id(const AV1_COMMON *cm,
const uint8_t *last_segment_ids,
uint8_t *current_segment_ids, int mi_offset,
int x_mis, int y_mis) {
@@ -156,7 +156,7 @@
: 0;
}
-static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int mi_row, int mi_col, aom_reader *r) {
struct segmentation *const seg = &cm->seg;
#if CONFIG_MISC_FIXES
@@ -189,7 +189,7 @@
}
if (seg->temporal_update) {
- const int ctx = vp10_get_pred_context_seg_id(xd);
+ const int ctx = av1_get_pred_context_seg_id(xd);
const aom_prob pred_prob = segp->pred_probs[ctx];
mbmi->seg_id_predicted = aom_read(r, pred_prob);
#if CONFIG_MISC_FIXES
@@ -213,12 +213,12 @@
return segment_id;
}
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
- const int ctx = vp10_get_skip_context(xd);
+ const int ctx = av1_get_skip_context(xd);
const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
@@ -226,7 +226,7 @@
}
}
-static void read_intra_frame_mode_info(VP10_COMMON *const cm,
+static void read_intra_frame_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, int mi_row,
int mi_col, aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
@@ -280,7 +280,7 @@
FRAME_COUNTS *counts = xd->counts;
TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
mbmi->tx_type =
- aom_read_tree(r, vp10_ext_tx_tree,
+ aom_read_tree(r, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -293,12 +293,12 @@
int usehp) {
int mag, d, fr, hp;
const int sign = aom_read(r, mvcomp->sign);
- const int mv_class = aom_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
+ const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
const int class0 = mv_class == MV_CLASS_0;
// Integer part
if (class0) {
- d = aom_read_tree(r, vp10_mv_class0_tree, mvcomp->class0);
+ d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
mag = 0;
} else {
int i;
@@ -310,7 +310,7 @@
}
// Fractional part
- fr = aom_read_tree(r, vp10_mv_fp_tree,
+ fr = aom_read_tree(r, av1_mv_fp_tree,
class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
@@ -325,8 +325,8 @@
const nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
const MV_JOINT_TYPE joint_type =
- (MV_JOINT_TYPE)aom_read_tree(r, vp10_mv_joint_tree, ctx->joints);
- const int use_hp = allow_hp && vp10_use_mv_hp(ref);
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
+ const int use_hp = allow_hp && av1_use_mv_hp(ref);
MV diff = { 0, 0 };
if (mv_joint_vertical(joint_type))
@@ -335,17 +335,17 @@
if (mv_joint_horizontal(joint_type))
diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
- vp10_inc_mv(&diff, counts, use_hp);
+ av1_inc_mv(&diff, counts, use_hp);
mv->row = ref->row + diff.row;
mv->col = ref->col + diff.col;
}
-static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
+static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
const MACROBLOCKD *xd,
aom_reader *r) {
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- const int ctx = vp10_get_reference_mode_context(cm, xd);
+ const int ctx = av1_get_reference_mode_context(cm, xd);
const REFERENCE_MODE mode =
(REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
@@ -357,7 +357,7 @@
}
// Read the referncence frame
-static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
aom_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
@@ -372,17 +372,17 @@
// FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
if (mode == COMPOUND_REFERENCE) {
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
- const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
+ const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
const int bit = aom_read(r, fc->comp_ref_prob[ctx]);
if (counts) ++counts->comp_ref[ctx][bit];
ref_frame[idx] = cm->comp_fixed_ref;
ref_frame[!idx] = cm->comp_var_ref[bit];
} else if (mode == SINGLE_REFERENCE) {
- const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
+ const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
- const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
+ const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
@@ -397,18 +397,18 @@
}
}
-static INLINE INTERP_FILTER read_switchable_interp_filter(VP10_COMMON *const cm,
+static INLINE INTERP_FILTER read_switchable_interp_filter(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
aom_reader *r) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
const INTERP_FILTER type = (INTERP_FILTER)aom_read_tree(
- r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
-static void read_intra_block_mode_info(VP10_COMMON *const cm,
+static void read_intra_block_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, MODE_INFO *mi,
aom_reader *r) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
@@ -446,7 +446,7 @@
mv->col < MV_UPP;
}
-static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
PREDICTION_MODE mode, int_mv mv[2],
int_mv ref_mv[2], int_mv nearest_mv[2],
int_mv near_mv[2], int is_compound, int allow_hp,
@@ -485,12 +485,12 @@
return ret;
}
-static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int segment_id, aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
} else {
- const int ctx = vp10_get_intra_inter_context(xd);
+ const int ctx = av1_get_intra_inter_context(xd);
const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->intra_inter[ctx][is_inter];
@@ -499,16 +499,16 @@
}
static void fpm_sync(void *const data, int mi_row) {
- VP10Decoder *const pbi = (VP10Decoder *)data;
- vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
+ AV1Decoder *const pbi = (AV1Decoder *)data;
+ av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
mi_row << MI_BLOCK_SIZE_LOG2);
}
-static void read_inter_block_mode_info(VP10Decoder *const pbi,
+static void read_inter_block_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi, int mi_row,
int mi_col, aom_reader *r) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
@@ -525,11 +525,11 @@
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
- if ((!vp10_is_valid_scale(&ref_buf->sf)))
+ if ((!av1_is_valid_scale(&ref_buf->sf)))
aom_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
- vp10_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], mi_row, mi_col,
+ av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+ av1_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], mi_row, mi_col,
fpm_sync, (void *)pbi, inter_mode_ctx);
}
@@ -548,7 +548,7 @@
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
for (ref = 0; ref < 1 + is_compound; ++ref) {
- vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+ av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
&nearestmv[ref], &nearmv[ref]);
}
}
@@ -572,7 +572,7 @@
if (b_mode == NEARESTMV || b_mode == NEARMV) {
uint8_t dummy_mode_ctx[MAX_REF_FRAMES];
for (ref = 0; ref < 1 + is_compound; ++ref)
- vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
+ av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
&nearest_sub8x8[ref],
&near_sub8x8[ref], dummy_mode_ctx);
}
@@ -601,10 +601,10 @@
}
}
-static void read_inter_frame_mode_info(VP10Decoder *const pbi,
+static void read_inter_frame_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd, int mi_row,
int mi_col, aom_reader *r) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
@@ -625,13 +625,13 @@
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
- mbmi->tx_type = aom_read_tree(r, vp10_ext_tx_tree,
+ mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
mbmi->tx_type =
- aom_read_tree(r, vp10_ext_tx_tree,
+ aom_read_tree(r, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -641,9 +641,9 @@
}
}
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
int mi_col, aom_reader *r, int x_mis, int y_mis) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h
index 22b8eea..6ae3df4 100644
--- a/av1/decoder/decodemv.h
+++ b/av1/decoder/decodemv.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DECODEMV_H_
-#define VP10_DECODER_DECODEMV_H_
+#ifndef AV1_DECODER_DECODEMV_H_
+#define AV1_DECODER_DECODEMV_H_
#include "aom_dsp/bitreader.h"
@@ -20,11 +20,11 @@
extern "C" {
#endif
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
int mi_col, aom_reader *r, int x_mis, int y_mis);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEMV_H_
+#endif // AV1_DECODER_DECODEMV_H_
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 97b96e0..80d14bb 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -41,19 +41,19 @@
av1_rtcd();
aom_dsp_rtcd();
aom_scale_rtcd();
- vp10_init_intra_predictors();
+ av1_init_intra_predictors();
init_done = 1;
}
}
-static void vp10_dec_setup_mi(VP10_COMMON *cm) {
+static void av1_dec_setup_mi(AV1_COMMON *cm) {
cm->mi = cm->mip + cm->mi_stride + 1;
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
memset(cm->mi_grid_base, 0,
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
-static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
+static int av1_dec_alloc_mi(AV1_COMMON *cm, int mi_size) {
cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
@@ -62,24 +62,24 @@
return 0;
}
-static void vp10_dec_free_mi(VP10_COMMON *cm) {
+static void av1_dec_free_mi(AV1_COMMON *cm) {
aom_free(cm->mip);
cm->mip = NULL;
aom_free(cm->mi_grid_base);
cm->mi_grid_base = NULL;
}
-VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
- VP10Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
- VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+AV1Decoder *av1_decoder_create(BufferPool *const pool) {
+ AV1Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
+ AV1_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm) return NULL;
- vp10_zero(*pbi);
+ av1_zero(*pbi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
- vp10_decoder_remove(pbi);
+ av1_decoder_remove(pbi);
return NULL;
}
@@ -104,11 +104,11 @@
cm->bit_depth = VPX_BITS_8;
cm->dequant_bit_depth = VPX_BITS_8;
- cm->alloc_mi = vp10_dec_alloc_mi;
- cm->free_mi = vp10_dec_free_mi;
- cm->setup_mi = vp10_dec_setup_mi;
+ cm->alloc_mi = av1_dec_alloc_mi;
+ cm->free_mi = av1_dec_free_mi;
+ cm->setup_mi = av1_dec_setup_mi;
- vp10_loop_filter_init(cm);
+ av1_loop_filter_init(cm);
#if CONFIG_AOM_QM
aom_qm_init(cm);
@@ -121,7 +121,7 @@
return pbi;
}
-void vp10_decoder_remove(VP10Decoder *pbi) {
+void av1_decoder_remove(AV1Decoder *pbi) {
int i;
if (!pbi) return;
@@ -138,7 +138,7 @@
aom_free(pbi->tile_workers);
if (pbi->num_tile_workers > 0) {
- vp10_loop_filter_dealloc(&pbi->lf_row_sync);
+ av1_loop_filter_dealloc(&pbi->lf_row_sync);
}
aom_free(pbi);
@@ -150,15 +150,15 @@
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
-aom_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *cm = &pbi->common;
+ AV1_COMMON *cm = &pbi->common;
/* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
* encoder is using the frame buffers for. This is just a stub to keep the
* aomenc --test-decode functionality working, and will be replaced in a
- * later commit that adds VP10-specific controls for this functionality.
+ * later commit that adds AV1-specific controls for this functionality.
*/
if (ref_frame_flag == VPX_LAST_FLAG) {
const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
@@ -179,7 +179,7 @@
return cm->error.error_code;
}
-aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
RefBuffer *ref_buf = NULL;
@@ -188,7 +188,7 @@
// TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
// encoder is using the frame buffers for. This is just a stub to keep the
// aomenc --test-decode functionality working, and will be replaced in a
- // later commit that adds VP10-specific controls for this functionality.
+ // later commit that adds AV1-specific controls for this functionality.
if (ref_frame_flag == VPX_LAST_FLAG) {
ref_buf = &cm->frame_refs[0];
} else if (ref_frame_flag == VPX_GOLD_FLAG) {
@@ -224,9 +224,9 @@
}
/* If any buffer updating is signaled it should be done here. */
-static void swap_frame_buffers(VP10Decoder *pbi) {
+static void swap_frame_buffers(AV1Decoder *pbi) {
int ref_index = 0, mask;
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
@@ -265,9 +265,9 @@
cm->frame_refs[ref_index].idx = -1;
}
-int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
+int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
const uint8_t **psource) {
- VP10_COMMON *volatile const cm = &pbi->common;
+ AV1_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
const uint8_t *source = *psource;
@@ -307,13 +307,13 @@
pbi->hold_ref_buf = 0;
if (cm->frame_parallel_decode) {
VPxWorker *const worker = pbi->frame_worker_owner;
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
// Reset decoding progress.
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
@@ -364,7 +364,7 @@
}
cm->error.setjmp = 1;
- vp10_decode_frame(pbi, source, source + size, psource);
+ av1_decode_frame(pbi, source, source + size, psource);
swap_frame_buffers(pbi);
@@ -374,7 +374,7 @@
cm->last_show_frame = cm->show_frame;
cm->prev_frame = cm->cur_frame;
if (cm->seg.enabled && !cm->frame_parallel_decode)
- vp10_swap_current_and_last_seg_map(cm);
+ av1_swap_current_and_last_seg_map(cm);
}
// Update progress in frame parallel decode.
@@ -383,15 +383,15 @@
// be accessing this buffer.
VPxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
if (cm->show_frame) {
cm->current_video_frame++;
}
frame_worker_data->frame_decoded = 1;
frame_worker_data->frame_context_ready = 1;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
cm->last_width = cm->width;
cm->last_height = cm->height;
@@ -404,8 +404,8 @@
return retcode;
}
-int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *const cm = &pbi->common;
+int av1_get_raw_frame(AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
+ AV1_COMMON *const cm = &pbi->common;
int ret = -1;
if (pbi->ready_for_new_data == 1) return ret;
@@ -423,7 +423,7 @@
return ret;
}
-aom_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
diff --git a/av1/decoder/decoder.h b/av1/decoder/decoder.h
index e2b9738..7cf8abb 100644
--- a/av1/decoder/decoder.h
+++ b/av1/decoder/decoder.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DECODER_H_
-#define VP10_DECODER_DECODER_H_
+#ifndef AV1_DECODER_DECODER_H_
+#define AV1_DECODER_DECODER_H_
#include "./aom_config.h"
@@ -29,7 +29,7 @@
// TODO(hkuang): combine this with TileWorkerData.
typedef struct TileData {
- VP10_COMMON *cm;
+ AV1_COMMON *cm;
aom_reader bit_reader;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
@@ -38,7 +38,7 @@
} TileData;
typedef struct TileWorkerData {
- struct VP10Decoder *pbi;
+ struct AV1Decoder *pbi;
aom_reader bit_reader;
FRAME_COUNTS counts;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
@@ -48,10 +48,10 @@
struct aom_internal_error_info error_info;
} TileWorkerData;
-typedef struct VP10Decoder {
+typedef struct AV1Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
- DECLARE_ALIGNED(16, VP10_COMMON, common);
+ DECLARE_ALIGNED(16, AV1_COMMON, common);
int ready_for_new_data;
@@ -71,7 +71,7 @@
TileData *tile_data;
int total_tiles;
- VP10LfSync lf_row_sync;
+ AV1LfSync lf_row_sync;
aom_decrypt_cb decrypt_cb;
void *decrypt_state;
@@ -80,18 +80,18 @@
int inv_tile_order;
int need_resync; // wait for key/intra-only frame.
int hold_ref_buf; // hold the reference buffer.
-} VP10Decoder;
+} AV1Decoder;
-int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
+int av1_receive_compressed_data(struct AV1Decoder *pbi, size_t size,
const uint8_t **dest);
-int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
+int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
-aom_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
@@ -107,14 +107,14 @@
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
-aom_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
aom_decrypt_cb decrypt_cb,
void *decrypt_state);
-struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
+struct AV1Decoder *av1_decoder_create(BufferPool *const pool);
-void vp10_decoder_remove(struct VP10Decoder *pbi);
+void av1_decoder_remove(struct AV1Decoder *pbi);
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
BufferPool *const pool) {
@@ -135,4 +135,4 @@
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODER_H_
+#endif // AV1_DECODER_DECODER_H_
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index b0e083e..99ee02d 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -86,38 +86,38 @@
eob_branch_count = counts->eob_branch[tx_size][type][ref];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->bd > VPX_BITS_8) {
if (xd->bd == VPX_BITS_10) {
- cat1_prob = vp10_cat1_prob_high10;
- cat2_prob = vp10_cat2_prob_high10;
- cat3_prob = vp10_cat3_prob_high10;
- cat4_prob = vp10_cat4_prob_high10;
- cat5_prob = vp10_cat5_prob_high10;
- cat6_prob = vp10_cat6_prob_high10;
+ cat1_prob = av1_cat1_prob_high10;
+ cat2_prob = av1_cat2_prob_high10;
+ cat3_prob = av1_cat3_prob_high10;
+ cat4_prob = av1_cat4_prob_high10;
+ cat5_prob = av1_cat5_prob_high10;
+ cat6_prob = av1_cat6_prob_high10;
} else {
- cat1_prob = vp10_cat1_prob_high12;
- cat2_prob = vp10_cat2_prob_high12;
- cat3_prob = vp10_cat3_prob_high12;
- cat4_prob = vp10_cat4_prob_high12;
- cat5_prob = vp10_cat5_prob_high12;
- cat6_prob = vp10_cat6_prob_high12;
+ cat1_prob = av1_cat1_prob_high12;
+ cat2_prob = av1_cat2_prob_high12;
+ cat3_prob = av1_cat3_prob_high12;
+ cat4_prob = av1_cat4_prob_high12;
+ cat5_prob = av1_cat5_prob_high12;
+ cat6_prob = av1_cat6_prob_high12;
}
} else {
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
}
#else
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
#endif
while (c < max_eob) {
@@ -147,8 +147,8 @@
val = 1;
} else {
INCREMENT_COUNT(TWO_TOKEN);
- token = aom_read_tree(r, vp10_coef_con_tree,
- vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+ token = aom_read_tree(r, av1_coef_con_tree,
+ av1_pareto8_full[prob[PIVOT_NODE] - 1]);
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
@@ -175,7 +175,7 @@
const int skip_bits = 0;
#endif
const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (xd->bd) {
case VPX_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
@@ -201,15 +201,15 @@
#endif
v = (val * dqv) >> dq_shift;
#if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dqcoeff[scan[c]] = highbd_check_range((aom_read_bit(r) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(aom_read_bit(r) ? -v : v);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
dqcoeff[scan[c]] = aom_read_bit(r) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
dqv = dq[1];
@@ -218,8 +218,8 @@
return c;
}
-// TODO(slavarnway): Decode version of vp10_set_context. Modify
-// vp10_set_context
+// TODO(slavarnway): Decode version of av1_set_context. Modify
+// av1_set_context
// after testing is complete, then delete this version.
static void dec_set_contexts(const MACROBLOCKD *xd,
struct macroblockd_plane *pd, TX_SIZE tx_size,
@@ -258,7 +258,7 @@
}
}
-int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+int av1_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
int x, int y, TX_SIZE tx_size, aom_reader *r,
int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 1abb5d5..2f21334 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DETOKENIZE_H_
-#define VP10_DECODER_DETOKENIZE_H_
+#ifndef AV1_DECODER_DETOKENIZE_H_
+#define AV1_DECODER_DETOKENIZE_H_
#include "aom_dsp/bitreader.h"
#include "av1/decoder/decoder.h"
@@ -20,7 +20,7 @@
extern "C" {
#endif
-int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+int av1_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
int x, int y, TX_SIZE tx_size, aom_reader *r,
int seg_id);
@@ -28,4 +28,4 @@
} // extern "C"
#endif
-#endif // VP10_DECODER_DETOKENIZE_H_
+#endif // AV1_DECODER_DETOKENIZE_H_
diff --git a/av1/decoder/dsubexp.c b/av1/decoder/dsubexp.c
index ac7aed5..5d941c0 100644
--- a/av1/decoder/dsubexp.c
+++ b/av1/decoder/dsubexp.c
@@ -69,7 +69,7 @@
return decode_uniform(r) + 64;
}
-void vp10_diff_update_prob(aom_reader *r, aom_prob *p) {
+void av1_diff_update_prob(aom_reader *r, aom_prob *p) {
if (aom_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
*p = (aom_prob)inv_remap_prob(delp, *p);
diff --git a/av1/decoder/dsubexp.h b/av1/decoder/dsubexp.h
index 779f345..acc38ba 100644
--- a/av1/decoder/dsubexp.h
+++ b/av1/decoder/dsubexp.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DSUBEXP_H_
-#define VP10_DECODER_DSUBEXP_H_
+#ifndef AV1_DECODER_DSUBEXP_H_
+#define AV1_DECODER_DSUBEXP_H_
#include "aom_dsp/bitreader.h"
@@ -18,10 +18,10 @@
extern "C" {
#endif
-void vp10_diff_update_prob(aom_reader *r, aom_prob *p);
+void av1_diff_update_prob(aom_reader *r, aom_prob *p);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DSUBEXP_H_
+#endif // AV1_DECODER_DSUBEXP_H_
diff --git a/av1/decoder/dthread.c b/av1/decoder/dthread.c
index 6ef92a4..25f8dca 100644
--- a/av1/decoder/dthread.c
+++ b/av1/decoder/dthread.c
@@ -18,7 +18,7 @@
// #define DEBUG_THREAD
// TODO(hkuang): Clean up all the #ifdef in this file.
-void vp10_frameworker_lock_stats(VPxWorker *const worker) {
+void av1_frameworker_lock_stats(VPxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_lock(&worker_data->stats_mutex);
@@ -27,7 +27,7 @@
#endif
}
-void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
+void av1_frameworker_unlock_stats(VPxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_unlock(&worker_data->stats_mutex);
@@ -36,7 +36,7 @@
#endif
}
-void vp10_frameworker_signal_stats(VPxWorker *const worker) {
+void av1_frameworker_signal_stats(VPxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
@@ -60,7 +60,7 @@
#endif
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
+void av1_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
int row) {
#if CONFIG_MULTITHREAD
if (!ref_buf) return;
@@ -77,7 +77,7 @@
VPxWorker *const ref_worker = ref_buf->frame_worker_owner;
FrameWorkerData *const ref_worker_data =
(FrameWorkerData *)ref_worker->data1;
- const VP10Decoder *const pbi = ref_worker_data->pbi;
+ const AV1Decoder *const pbi = ref_worker_data->pbi;
#ifdef DEBUG_THREAD
{
@@ -88,7 +88,7 @@
}
#endif
- vp10_frameworker_lock_stats(ref_worker);
+ av1_frameworker_lock_stats(ref_worker);
while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
ref_buf->buf.corrupted != 1) {
pthread_cond_wait(&ref_worker_data->stats_cond,
@@ -97,12 +97,12 @@
if (ref_buf->buf.corrupted == 1) {
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
- vp10_frameworker_unlock_stats(ref_worker);
+ av1_frameworker_unlock_stats(ref_worker);
aom_internal_error(&worker_data->pbi->common.error,
VPX_CODEC_CORRUPT_FRAME,
"Worker %p failed to decode frame", worker);
}
- vp10_frameworker_unlock_stats(ref_worker);
+ av1_frameworker_unlock_stats(ref_worker);
}
#else
(void)worker;
@@ -112,7 +112,7 @@
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row) {
#if CONFIG_MULTITHREAD
VPxWorker *worker = buf->frame_worker_owner;
@@ -124,27 +124,27 @@
}
#endif
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
buf->row = row;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
#else
(void)buf;
(void)row;
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
+void av1_frameworker_copy_context(VPxWorker *const dst_worker,
VPxWorker *const src_worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
- VP10_COMMON *const src_cm = &src_worker_data->pbi->common;
- VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+ AV1_COMMON *const src_cm = &src_worker_data->pbi->common;
+ AV1_COMMON *const dst_cm = &dst_worker_data->pbi->common;
int i;
// Wait until source frame's context is ready.
- vp10_frameworker_lock_stats(src_worker);
+ av1_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
&src_worker_data->stats_mutex);
@@ -154,10 +154,10 @@
? src_cm->current_frame_seg_map
: src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
- vp10_frameworker_unlock_stats(src_worker);
+ av1_frameworker_unlock_stats(src_worker);
dst_cm->bit_depth = src_cm->bit_depth;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
dst_cm->prev_frame =
diff --git a/av1/decoder/dthread.h b/av1/decoder/dthread.h
index a40c0df..dcaea15 100644
--- a/av1/decoder/dthread.h
+++ b/av1/decoder/dthread.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_DECODER_DTHREAD_H_
-#define VP10_DECODER_DTHREAD_H_
+#ifndef AV1_DECODER_DTHREAD_H_
+#define AV1_DECODER_DTHREAD_H_
#include "./aom_config.h"
#include "aom_util/aom_thread.h"
@@ -20,13 +20,13 @@
extern "C" {
#endif
-struct VP10Common;
-struct VP10Decoder;
+struct AV1Common;
+struct AV1Decoder;
// WorkerData for the FrameWorker thread. It contains all the information of
// the worker and decode structures for decoding a frame.
typedef struct FrameWorkerData {
- struct VP10Decoder *pbi;
+ struct AV1Decoder *pbi;
const uint8_t *data;
const uint8_t *data_end;
size_t data_size;
@@ -49,27 +49,27 @@
int frame_decoded; // Finished decoding current frame.
} FrameWorkerData;
-void vp10_frameworker_lock_stats(VPxWorker *const worker);
-void vp10_frameworker_unlock_stats(VPxWorker *const worker);
-void vp10_frameworker_signal_stats(VPxWorker *const worker);
+void av1_frameworker_lock_stats(VPxWorker *const worker);
+void av1_frameworker_unlock_stats(VPxWorker *const worker);
+void av1_frameworker_signal_stats(VPxWorker *const worker);
// Wait until ref_buf has been decoded to row in real pixel unit.
// Note: worker may already finish decoding ref_buf and release it in order to
// start decoding next frame. So need to check whether worker is still decoding
// ref_buf.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
+void av1_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
int row);
// FrameWorker broadcasts its decoding progress so other workers that are
// waiting on it can resume decoding.
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row);
// Copy necessary decoding context from src worker to dst worker.
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
+void av1_frameworker_copy_context(VPxWorker *const dst_worker,
VPxWorker *const src_worker);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DTHREAD_H_
+#endif // AV1_DECODER_DTHREAD_H_
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
index 0f632b8..2fc004e 100644
--- a/av1/encoder/aq_complexity.c
+++ b/av1/encoder/aq_complexity.c
@@ -43,12 +43,12 @@
static int get_aq_c_strength(int q_index, aom_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
- const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
+ const int base_quant = av1_ac_quant(q_index, 0, bit_depth) / 4;
return (base_quant > 10) + (base_quant > 25);
}
-void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
struct segmentation *const seg = &cm->seg;
// Make SURE use of floating point in this function is safe.
@@ -63,22 +63,22 @@
// Clear down the segment map.
memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
// Segmentation only makes sense if the target bits per SB is above a
// threshold. Below this the overheads will usually outweigh any benefit.
if (cpi->rc.sb64_target_rate < 256) {
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
return;
}
- vp10_enable_segmentation(seg);
+ av1_enable_segmentation(seg);
// Select delta coding method.
seg->abs_delta = SEGMENT_DELTADATA;
// Default segment "Q" feature is disabled so it defaults to the baseline Q.
- vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
+ av1_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
// Use some of the segments for in frame Q adjustment.
for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
@@ -86,7 +86,7 @@
if (segment == DEFAULT_AQ2_SEG) continue;
- qindex_delta = vp10_compute_qdelta_by_rate(
+ qindex_delta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, cm->base_qindex,
aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
@@ -98,8 +98,8 @@
qindex_delta = -cm->base_qindex + 1;
}
if ((cm->base_qindex + qindex_delta) > 0) {
- vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
- vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+ av1_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+ av1_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
}
}
}
@@ -111,9 +111,9 @@
// Select a segment for the current block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
-void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
int mi_row, int mi_col, int projected_rate) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
@@ -140,8 +140,8 @@
MIN_DEFAULT_LV_THRESH)
: DEFAULT_LV_THRESH;
- vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
- logvar = vp10_log_block_var(cpi, mb, bs);
+ av1_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
+ logvar = av1_log_block_var(cpi, mb, bs);
segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
for (i = 0; i < AQ_C_SEGMENTS; ++i) {
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
index 7ba22c8..05658d4 100644
--- a/av1/encoder/aq_complexity.h
+++ b/av1/encoder/aq_complexity.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
-#define VP10_ENCODER_AQ_COMPLEXITY_H_
+#ifndef AV1_ENCODER_AQ_COMPLEXITY_H_
+#define AV1_ENCODER_AQ_COMPLEXITY_H_
#ifdef __cplusplus
extern "C" {
@@ -18,20 +18,20 @@
#include "av1/common/enums.h"
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
// Select a segment for the current Block.
-void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
+void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
BLOCK_SIZE bs, int mi_row, int mi_col,
int projected_rate);
// This function sets up a set of segments with delta Q values around
// the baseline frame quantizer.
-void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi);
+void av1_setup_in_frame_q_adj(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_COMPLEXITY_H_
+#endif // AV1_ENCODER_AQ_COMPLEXITY_H_
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
index 3d48c14..7f7a5b3 100644
--- a/av1/encoder/aq_cyclicrefresh.c
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -57,7 +57,7 @@
int qindex_delta[3];
};
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
size_t last_coded_q_map_size;
CYCLIC_REFRESH *const cr = aom_calloc(1, sizeof(*cr));
if (cr == NULL) return NULL;
@@ -79,14 +79,14 @@
return cr;
}
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
aom_free(cr->map);
aom_free(cr->last_coded_q_map);
aom_free(cr);
}
// Check if we should turn off cyclic refresh based on bitrate condition.
-static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
+static int apply_cyclic_refresh_bitrate(const AV1_COMMON *cm,
const RATE_CONTROL *rc) {
// Turn off cyclic refresh if bits available per frame is not sufficiently
// larger than bit cost of segmentation. Segment map bit cost should scale
@@ -134,10 +134,10 @@
}
// Compute delta-q for the segment.
-static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
+static int compute_deltaq(const AV1_COMP *cpi, int q, double rate_factor) {
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const RATE_CONTROL *const rc = &cpi->rc;
- int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+ int deltaq = av1_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
rate_factor, cpi->common.bit_depth);
if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
deltaq = -cr->max_qdelta_perc * q / 100;
@@ -149,9 +149,9 @@
// from non-base segment. For now ignore effect of multiple segments
// (with different delta-q). Note this function is called in the postencode
// (called from rc_update_rate_correction_factors()).
-int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
+int av1_cyclic_refresh_estimate_bits_at_q(const AV1_COMP *cpi,
double correction_factor) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int estimated_bits;
int mbs = cm->MBs;
@@ -163,14 +163,14 @@
// Take segment weighted average for estimated bits.
estimated_bits =
(int)((1.0 - weight_segment1 - weight_segment2) *
- vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+ av1_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
correction_factor, cm->bit_depth) +
weight_segment1 *
- vp10_estimate_bits_at_q(cm->frame_type,
+ av1_estimate_bits_at_q(cm->frame_type,
cm->base_qindex + cr->qindex_delta[1],
mbs, correction_factor, cm->bit_depth) +
weight_segment2 *
- vp10_estimate_bits_at_q(cm->frame_type,
+ av1_estimate_bits_at_q(cm->frame_type,
cm->base_qindex + cr->qindex_delta[2],
mbs, correction_factor, cm->bit_depth));
return estimated_bits;
@@ -181,9 +181,9 @@
// rc_regulate_q() to set the base qp index.
// Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
// to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
-int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
+int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
double correction_factor) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int bits_per_mb;
int num8x8bl = cm->MBs << 2;
@@ -198,10 +198,10 @@
int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
// Take segment weighted average for bits per mb.
bits_per_mb =
- (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
+ (int)((1.0 - weight_segment) * av1_rc_bits_per_mb(cm->frame_type, i,
correction_factor,
cm->bit_depth) +
- weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
+ weight_segment * av1_rc_bits_per_mb(cm->frame_type, i + deltaq,
correction_factor,
cm->bit_depth));
return bits_per_mb;
@@ -210,11 +210,11 @@
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
@@ -278,8 +278,8 @@
}
// Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_postencode(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->segmentation_map;
int mi_row, mi_col;
@@ -298,7 +298,7 @@
}
// Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_set_golden_update(AV1_COMP *const cpi) {
RATE_CONTROL *const rc = &cpi->rc;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
// Set minimum gf_interval for GF update to a multiple (== 2) of refresh
@@ -314,8 +314,8 @@
// background has high motion, refresh the golden frame. Otherwise, if the
// golden reference is to be updated check if we should NOT update the golden
// ref.
-void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_check_golden_update(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int mi_row, mi_col;
double fraction_low = 0.0;
@@ -356,7 +356,7 @@
// the resolution (resize_pending != 0).
if (cpi->resize_pending != 0 ||
(cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
- vp10_cyclic_refresh_set_golden_update(cpi);
+ av1_cyclic_refresh_set_golden_update(cpi);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
if (rc->frames_till_gf_update_due > rc->frames_to_key)
@@ -385,8 +385,8 @@
// 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock.
// Blocks labeled as BOOST1 may later get set to BOOST2 (during the
// encoding of the superblock).
-static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->segmentation_map;
int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
@@ -412,7 +412,7 @@
int mi_col = sb_col_index * MI_BLOCK_SIZE;
int qindex_thresh =
cpi->oxcf.content == VPX_CONTENT_SCREEN
- ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
: 0;
assert(mi_row >= 0 && mi_row < cm->mi_rows);
assert(mi_col >= 0 && mi_col < cm->mi_cols);
@@ -453,9 +453,9 @@
}
// Set cyclic refresh parameters.
-void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
cr->percent_refresh = 10;
cr->max_qdelta_perc = 50;
@@ -477,8 +477,8 @@
}
// Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
struct segmentation *const seg = &cm->seg;
@@ -489,7 +489,7 @@
// Set segmentation map to 0 and disable.
unsigned char *const seg_map = cpi->segmentation_map;
memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
- vp10_disable_segmentation(&cm->seg);
+ av1_disable_segmentation(&cm->seg);
if (cm->frame_type == KEY_FRAME) {
memset(cr->last_coded_q_map, MAXQ,
cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
@@ -499,37 +499,37 @@
} else {
int qindex_delta = 0;
int qindex2;
- const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
+ const double q = av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
aom_clear_system_state();
// Set rate threshold to some multiple (set to 2 for now) of the target
// rate (target is given by sb64_target_rate and scaled by 256).
cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
// Distortion threshold, quadratic in Q, scale factor to be adjusted.
// q will not exceed 457, so (q * q) is within 32bit; see:
- // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[].
+ // av1_convert_qindex_to_q(), av1_ac_quant(), ac_qlookup*[].
cr->thresh_dist_sb = ((int64_t)(q * q)) << 2;
// Set up segmentation.
// Clear down the segment map.
- vp10_enable_segmentation(&cm->seg);
- vp10_clearall_segfeatures(seg);
+ av1_enable_segmentation(&cm->seg);
+ av1_clearall_segfeatures(seg);
// Select delta coding method.
seg->abs_delta = SEGMENT_DELTADATA;
// Note: setting temporal_update has no effect, as the seg-map coding method
// (temporal or spatial) is determined in
- // vp10_choose_segmap_coding_method(),
+ // av1_choose_segmap_coding_method(),
// based on the coding cost of each method. For error_resilient mode on the
// last_frame_seg_map is set to 0, so if temporal coding is used, it is
// relative to 0 previous map.
// seg->temporal_update = 0;
// Segment BASE "Q" feature is disabled so it defaults to the baseline Q.
- vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
+ av1_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
// Use segment BOOST1 for in-frame Q adjustment.
- vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
// Use segment BOOST2 for more aggressive in-frame Q adjustment.
- vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
// Set the q delta for segment BOOST1.
qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta);
@@ -538,9 +538,9 @@
// Compute rd-mult for segment BOOST1.
qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ);
- cr->rdmult = vp10_compute_rd_mult(cpi, qindex2);
+ cr->rdmult = av1_compute_rd_mult(cpi, qindex2);
- vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
+ av1_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
// Set a more aggressive (higher) q delta for segment BOOST2.
qindex_delta = compute_deltaq(
@@ -548,19 +548,19 @@
VPXMIN(CR_MAX_RATE_TARGET_RATIO,
0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta));
cr->qindex_delta[2] = qindex_delta;
- vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
+ av1_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
// Update the segmentation and refresh map.
cyclic_refresh_update_map(cpi);
}
}
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
return cr->rdmult;
}
-void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
memset(cr->map, 0, cm->mi_rows * cm->mi_cols);
cr->sb_index = 0;
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
index dcdc039..327fe14 100644
--- a/av1/encoder/aq_cyclicrefresh.h
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
-#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#ifndef AV1_ENCODER_AQ_CYCLICREFRESH_H_
+#define AV1_ENCODER_AQ_CYCLICREFRESH_H_
#include "av1/common/blockd.h"
@@ -27,55 +27,55 @@
// Maximum rate target ratio for setting segment delta-qp.
#define CR_MAX_RATE_TARGET_RATIO 4.0
-struct VP10_COMP;
+struct AV1_COMP;
struct CYCLIC_REFRESH;
typedef struct CYCLIC_REFRESH CYCLIC_REFRESH;
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols);
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols);
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr);
// Estimate the bits, incorporating the delta-q from segment 1, after encoding
// the frame.
-int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
+int av1_cyclic_refresh_estimate_bits_at_q(const struct AV1_COMP *cpi,
double correction_factor);
// Estimate the bits per mb, for a given q = i and a corresponding delta-q
// (for segment 1), prior to encoding the frame.
-int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
+int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
double correction_factor);
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip);
// Update the segmentation map, and related quantities: cyclic refresh map,
// refresh sb_index, and target number of blocks to be refreshed.
-void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update__map(struct AV1_COMP *const cpi);
// Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_postencode(struct AV1_COMP *const cpi);
// Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_set_golden_update(struct AV1_COMP *const cpi);
// Check if we should not update golden reference, based on past refresh stats.
-void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_check_golden_update(struct AV1_COMP *const cpi);
// Set/update global/frame level refresh parameters.
-void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update_parameters(struct AV1_COMP *const cpi);
// Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_setup(struct AV1_COMP *const cpi);
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
-void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_reset_resize(struct AV1_COMP *const cpi);
static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) {
return segment_id == CR_SEGMENT_ID_BOOST1 ||
@@ -95,4 +95,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#endif // AV1_ENCODER_AQ_CYCLICREFRESH_H_
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
index 37e53f6..b2eb17a 100644
--- a/av1/encoder/aq_variance.c
+++ b/av1/encoder/aq_variance.c
@@ -33,26 +33,26 @@
#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
-DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = { 0 };
-#if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = { 0 };
+DECLARE_ALIGNED(16, static const uint8_t, av1_64_zeros[64]) = { 0 };
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, static const uint16_t, av1_highbd_64_zeros[64]) = { 0 };
#endif
-unsigned int vp10_vaq_segment_id(int energy) {
+unsigned int av1_vaq_segment_id(int energy) {
ENERGY_IN_BOUNDS(energy);
return SEGMENT_ID(energy);
}
-void vp10_vaq_frame_setup(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_vaq_frame_setup(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
struct segmentation *seg = &cm->seg;
int i;
if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
- vp10_enable_segmentation(seg);
- vp10_clearall_segfeatures(seg);
+ av1_enable_segmentation(seg);
+ av1_clearall_segfeatures(seg);
seg->abs_delta = SEGMENT_DELTADATA;
@@ -60,7 +60,7 @@
for (i = 0; i < MAX_SEGMENTS; ++i) {
int qindex_delta =
- vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
+ av1_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
rate_ratio[i], cm->bit_depth);
// We don't allow qindex 0 in a segment if the base value is not 0.
@@ -76,8 +76,8 @@
continue;
}
- vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
- vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+ av1_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
+ av1_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
}
}
}
@@ -105,7 +105,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
uint64_t *sse, uint64_t *sum) {
@@ -136,9 +136,9 @@
*sse = (unsigned int)sse_long;
*sum = (int)sum_long;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
+static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
@@ -151,54 +151,54 @@
const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
int avg;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, bw, bh,
+ CONVERT_TO_BYTEPTR(av1_highbd_64_zeros), 0, bw, bh,
&sse, &avg);
sse >>= 2 * (xd->bd - 8);
avg >>= (xd->bd - 8);
} else {
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_64_zeros, 0,
bw, bh, &sse, &avg);
}
#else
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_64_zeros, 0,
bw, bh, &sse, &avg);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
var = sse - (((int64_t)avg * avg) / (bw * bh));
return (256 * var) / (bw * bh);
} else {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
var =
cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, &sse);
+ CONVERT_TO_BYTEPTR(av1_highbd_64_zeros), 0, &sse);
} else {
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_64_zeros, 0, &sse);
+ av1_64_zeros, 0, &sse);
}
#else
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_64_zeros, 0, &sse);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ av1_64_zeros, 0, &sse);
+#endif // CONFIG_AOM_HIGHBITDEPTH
return (256 * var) >> num_pels_log2_lookup[bs];
}
}
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
aom_clear_system_state();
return log(var + 1.0);
}
#define DEFAULT_E_MIDPOINT 10.0
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
aom_clear_system_state();
energy_midpoint =
(cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
- energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
+ energy = av1_log_block_var(cpi, x, bs) - energy_midpoint;
return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
}
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
index fa1103c..4900aa7 100644
--- a/av1/encoder/aq_variance.h
+++ b/av1/encoder/aq_variance.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_AQ_VARIANCE_H_
-#define VP10_ENCODER_AQ_VARIANCE_H_
+#ifndef AV1_ENCODER_AQ_VARIANCE_H_
+#define AV1_ENCODER_AQ_VARIANCE_H_
#include "av1/encoder/encoder.h"
@@ -18,14 +18,14 @@
extern "C" {
#endif
-unsigned int vp10_vaq_segment_id(int energy);
-void vp10_vaq_frame_setup(VP10_COMP *cpi);
+unsigned int av1_vaq_segment_id(int energy);
+void av1_vaq_frame_setup(AV1_COMP *cpi);
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_AQ_VARIANCE_H_
+#endif // AV1_ENCODER_AQ_VARIANCE_H_
diff --git a/av1/encoder/arm/neon/dct_neon.c b/av1/encoder/arm/neon/dct_neon.c
index 947b41a..bfcb0b4 100644
--- a/av1/encoder/arm/neon/dct_neon.c
+++ b/av1/encoder/arm/neon/dct_neon.c
@@ -18,7 +18,7 @@
#include "av1/common/blockd.h"
#include "aom_dsp/txfm_common.h"
-void vp10_fdct8x8_quant_neon(
+void av1_fdct8x8_quant_neon(
const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
@@ -28,7 +28,7 @@
(void)coeff_ptr;
aom_fdct8x8_neon(input, temp_buffer, stride);
- vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+ av1_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
}
diff --git a/av1/encoder/arm/neon/error_neon.c b/av1/encoder/arm/neon/error_neon.c
index b0761cd..0c89f2e 100644
--- a/av1/encoder/arm/neon/error_neon.c
+++ b/av1/encoder/arm/neon/error_neon.c
@@ -14,7 +14,7 @@
#include "./av1_rtcd.h"
-int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
int block_size) {
int64x2_t error = vdupq_n_s64(0);
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
index ded962d..6abf639 100644
--- a/av1/encoder/arm/neon/quantize_neon.c
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -22,7 +22,7 @@
#include "av1/encoder/quantize.h"
#include "av1/encoder/rd.h"
-void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+void av1_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 3f1ccc6..2c90bec 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -42,34 +42,34 @@
#include "av1/encoder/subexp.h"
#include "av1/encoder/tokenize.h"
-static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+static const struct av1_token intra_mode_encodings[INTRA_MODES] = {
{ 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
{ 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
};
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
{ { 0, 1 }, { 2, 2 }, { 3, 2 } };
-static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+static const struct av1_token partition_encodings[PARTITION_TYPES] = {
{ 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
};
-static const struct vp10_token inter_mode_encodings[INTER_MODES] = {
+static const struct av1_token inter_mode_encodings[INTER_MODES] = {
{ 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
};
-static struct vp10_token ext_tx_encodings[TX_TYPES];
+static struct av1_token ext_tx_encodings[TX_TYPES];
-void vp10_encode_token_init() {
- vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+void av1_encode_token_init() {
+ av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
}
static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
const aom_prob *probs) {
- vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+ av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
static void write_inter_mode(aom_writer *w, PREDICTION_MODE mode,
const aom_prob *probs) {
assert(is_inter_mode(mode));
- vp10_write_token(w, vp10_inter_mode_tree, probs,
+ av1_write_token(w, av1_inter_mode_tree, probs,
&inter_mode_encodings[INTER_OFFSET(mode)]);
}
@@ -88,9 +88,9 @@
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i)
- vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+ av1_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
}
static int prob_diff_update_savings(const aom_tree_index *tree,
@@ -103,14 +103,14 @@
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i) {
- savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+ savings += av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
}
return savings;
}
-static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
aom_writer *w) {
TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
@@ -125,37 +125,37 @@
}
}
-static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int segment_id, const MODE_INFO *mi, aom_writer *w) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int skip = mi->mbmi.skip;
- aom_write(w, skip, vp10_get_skip_prob(cm, xd));
+ aom_write(w, skip, av1_get_skip_prob(cm, xd));
return skip;
}
}
-static void update_skip_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int k;
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+ av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
}
-static void update_switchable_interp_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
- prob_diff_update(vp10_switchable_interp_tree,
+ prob_diff_update(av1_switchable_interp_tree,
cm->fc->switchable_interp_prob[j],
counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
}
-static void update_ext_tx_probs(VP10_COMMON *cm, aom_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int savings = 0;
@@ -163,7 +163,7 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
savings += prob_diff_update_savings(
- vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES);
}
do_update = savings > savings_thresh;
@@ -171,21 +171,21 @@
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
}
}
savings = 0;
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
savings +=
- prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES);
}
do_update = savings > savings_thresh;
aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES, w);
}
}
@@ -201,22 +201,22 @@
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
- const struct vp10_token *const a = &vp10_coef_encodings[t];
+ const struct av1_token *const a = &av1_coef_encodings[t];
int i = 0;
int v = a->value;
int n = a->len;
-#if CONFIG_VPX_HIGHBITDEPTH
- const vp10_extra_bit *b;
+#if CONFIG_AOM_HIGHBITDEPTH
+ const av1_extra_bit *b;
if (bit_depth == VPX_BITS_12)
- b = &vp10_extra_bits_high12[t];
+ b = &av1_extra_bits_high12[t];
else if (bit_depth == VPX_BITS_10)
- b = &vp10_extra_bits_high10[t];
+ b = &av1_extra_bits_high10[t];
else
- b = &vp10_extra_bits[t];
+ b = &av1_extra_bits[t];
#else
- const vp10_extra_bit *const b = &vp10_extra_bits[t];
+ const av1_extra_bit *const b = &av1_extra_bits[t];
(void)bit_depth;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* skip one or two nodes */
if (p->skip_eob_node) {
@@ -235,12 +235,12 @@
if (t >= TWO_TOKEN && t < EOB_TOKEN) {
int len = UNCONSTRAINED_NODES - p->skip_eob_node;
int bits = v >> (n - len);
- vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i);
- vp10_write_tree(w, vp10_coef_con_tree,
- vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+ av1_write_tree(w, av1_coef_tree, p->context_tree, bits, len, i);
+ av1_write_tree(w, av1_coef_con_tree,
+ av1_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
n - len, 0);
} else {
- vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i);
+ av1_write_tree(w, av1_coef_tree, p->context_tree, v, n, i);
}
if (b->base_val) {
@@ -281,11 +281,11 @@
const struct segmentation_probs *segp,
int segment_id) {
if (seg->enabled && seg->update_map)
- vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+ av1_write_tree(w, av1_segment_tree, segp->tree_probs, segment_id, 3, 0);
}
// This function encodes the reference frame
-static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
@@ -301,28 +301,28 @@
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- aom_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+ aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
} else {
assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
}
if (is_compound) {
aom_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
- vp10_get_pred_prob_comp_ref_p(cm, xd));
+ av1_get_pred_prob_comp_ref_p(cm, xd));
} else {
const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
- aom_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+ aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
if (bit0) {
const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
- aom_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
}
}
}
}
-static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
aom_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const nmv_context *nmvc = &cm->fc->nmvc;
const MACROBLOCK *const x = &cpi->td.mb;
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -345,7 +345,7 @@
if (seg->update_map) {
if (seg->temporal_update) {
const int pred_flag = mbmi->seg_id_predicted;
- aom_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
+ aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
aom_write(w, pred_flag, pred_prob);
if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
} else {
@@ -356,7 +356,7 @@
skip = write_skip(cm, xd, segment_id, mi, w);
if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- aom_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+ aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
!(is_inter && skip) && !xd->lossless[segment_id]) {
@@ -391,8 +391,8 @@
}
if (cm->interp_filter == SWITCHABLE) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
- vp10_write_token(w, vp10_switchable_interp_tree,
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
+ av1_write_token(w, av1_switchable_interp_tree,
cm->fc->switchable_interp_prob[ctx],
&switchable_interp_encodings[mbmi->interp_filter]);
++cpi->interp_filter_selected[0][mbmi->interp_filter];
@@ -411,7 +411,7 @@
write_inter_mode(w, b_mode, inter_probs);
if (b_mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
nmvc, allow_hp);
}
@@ -420,7 +420,7 @@
} else {
if (mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
- vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
nmvc, allow_hp);
}
@@ -429,12 +429,12 @@
if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
if (is_inter) {
- vp10_write_token(w, vp10_ext_tx_tree,
+ av1_write_token(w, av1_ext_tx_tree,
cm->fc->inter_ext_tx_prob[mbmi->tx_size],
&ext_tx_encodings[mbmi->tx_type]);
} else {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size]
[intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
@@ -444,7 +444,7 @@
}
}
-static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO **mi_8x8, aom_writer *w) {
const struct segmentation *const seg = &cm->seg;
#if CONFIG_MISC_FIXES
@@ -487,19 +487,19 @@
if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size]
[intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
}
}
-static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
aom_writer *w, TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end, int mi_row,
int mi_col) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
int plane;
@@ -530,7 +530,7 @@
}
}
-static void write_partition(const VP10_COMMON *const cm,
+static void write_partition(const AV1_COMMON *const cm,
const MACROBLOCKD *const xd, int hbs, int mi_row,
int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
aom_writer *w) {
@@ -540,7 +540,7 @@
const int has_cols = (mi_col + hbs) < cm->mi_cols;
if (has_rows && has_cols) {
- vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+ av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
} else if (!has_rows && has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
aom_write(w, p == PARTITION_SPLIT, probs[1]);
@@ -552,11 +552,11 @@
}
}
-static void write_modes_sb(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes_sb(AV1_COMP *cpi, const TileInfo *const tile,
aom_writer *w, TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end, int mi_row,
int mi_col, BLOCK_SIZE bsize) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
const int bsl = b_width_log2_lookup[bsize];
@@ -617,7 +617,7 @@
#endif
}
-static void write_modes(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes(AV1_COMP *cpi, const TileInfo *const tile,
aom_writer *w, TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end) {
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
@@ -625,17 +625,17 @@
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += MI_BLOCK_SIZE) {
- vp10_zero(xd->left_seg_context);
+ av1_zero(xd->left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE)
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
}
}
-static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
- vp10_coeff_stats *coef_branch_ct,
- vp10_coeff_probs_model *coef_probs) {
- vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
+ av1_coeff_stats *coef_branch_ct,
+ av1_coeff_probs_model *coef_probs) {
+ av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cpi->common.counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -644,7 +644,7 @@
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
- vp10_tree_probs_from_distribution(vp10_coef_tree,
+ av1_tree_probs_from_distribution(av1_coef_tree,
coef_branch_ct[i][j][k][l],
coef_counts[i][j][k][l]);
coef_branch_ct[i][j][k][l][0][1] =
@@ -659,11 +659,11 @@
}
}
-static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
TX_SIZE tx_size,
- vp10_coeff_stats *frame_branch_ct,
- vp10_coeff_probs_model *new_coef_probs) {
- vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+ av1_coeff_stats *frame_branch_ct,
+ av1_coeff_probs_model *new_coef_probs) {
+ av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
const aom_prob upd = DIFF_UPDATE_PROB;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
@@ -684,17 +684,17 @@
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
if (s > 0 && newp != oldp) u = 1;
if (u)
- savings += s - (int)(vp10_cost_zero(upd));
+ savings += s - (int)(av1_cost_zero(upd));
else
- savings -= (int)(vp10_cost_zero(upd));
+ savings -= (int)(av1_cost_zero(upd));
update[u]++;
}
}
@@ -721,17 +721,17 @@
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
if (s > 0 && newp != *oldp) u = 1;
aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -757,11 +757,11 @@
int u = 0;
if (t == PIVOT_NODE) {
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
} else {
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
@@ -781,7 +781,7 @@
aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -798,13 +798,13 @@
}
}
-static void update_coef_probs(VP10_COMP *cpi, aom_writer *w) {
+static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
- vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
- vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+ av1_coeff_stats frame_branch_ct[PLANE_TYPES];
+ av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
(tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
aom_write_bit(w, 0);
@@ -855,7 +855,7 @@
}
#if CONFIG_CLPF
-static void encode_clpf(const VP10_COMMON *cm,
+static void encode_clpf(const AV1_COMMON *cm,
struct aom_write_bit_buffer *wb) {
aom_wb_write_literal(wb, cm->clpf, 1);
}
@@ -876,7 +876,7 @@
}
}
-static void encode_quantization(const VP10_COMMON *const cm,
+static void encode_quantization(const AV1_COMMON *const cm,
struct aom_write_bit_buffer *wb) {
aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
write_delta_q(wb, cm->y_dc_delta_q);
@@ -891,7 +891,7 @@
#endif
}
-static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
struct aom_write_bit_buffer *wb) {
int i, j;
@@ -911,7 +911,7 @@
}
if (seg->update_map) {
// Select the coding strategy (temporal or spatial)
- vp10_choose_segmap_coding_method(cm, xd);
+ av1_choose_segmap_coding_method(cm, xd);
#if !CONFIG_MISC_FIXES
// Write out probabilities used to decode unpredicted macro-block segments
for (i = 0; i < SEG_TREE_PROBS; i++) {
@@ -952,9 +952,9 @@
aom_wb_write_bit(wb, active);
if (active) {
const int data = get_segdata(seg, i, j);
- const int data_max = vp10_seg_feature_data_max(j);
+ const int data_max = av1_seg_feature_data_max(j);
- if (vp10_is_segfeature_signed(j)) {
+ if (av1_is_segfeature_signed(j)) {
encode_unsigned_max(wb, abs(data), data_max);
aom_wb_write_bit(wb, data < 0);
} else {
@@ -967,8 +967,8 @@
}
#if CONFIG_MISC_FIXES
-static void update_seg_probs(VP10_COMP *cpi, aom_writer *w) {
- VP10_COMMON *cm = &cpi->common;
+static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
+ AV1_COMMON *cm = &cpi->common;
if (!cpi->common.seg.enabled) return;
@@ -976,13 +976,13 @@
int i;
for (i = 0; i < PREDICTION_PROBS; i++)
- vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+ av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
cm->counts.seg.pred[i]);
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
} else {
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_total, MAX_SEGMENTS, w);
}
}
@@ -998,7 +998,7 @@
}
#endif
-static void update_txfm_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
if (cm->tx_mode == TX_MODE_SELECT) {
int i, j;
@@ -1007,22 +1007,22 @@
unsigned int ct_32x32p[TX_SIZES - 1][2];
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
+ av1_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
for (j = 0; j < TX_SIZES - 3; j++)
- vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
+ av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
+ av1_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
for (j = 0; j < TX_SIZES - 2; j++)
- vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
+ av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
ct_16x16p[j]);
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
+ av1_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
for (j = 0; j < TX_SIZES - 1; j++)
- vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
+ av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
ct_32x32p[j]);
}
}
@@ -1034,7 +1034,7 @@
if (filter != SWITCHABLE) aom_wb_write_literal(wb, filter, 2);
}
-static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
if (cm->interp_filter == SWITCHABLE) {
// Check to see if only one of the filters is actually used
int count[SWITCHABLE_FILTERS];
@@ -1057,10 +1057,10 @@
}
}
-static void write_tile_info(const VP10_COMMON *const cm,
+static void write_tile_info(const AV1_COMMON *const cm,
struct aom_write_bit_buffer *wb) {
int min_log2_tile_cols, max_log2_tile_cols, ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
ones = cm->log2_tile_cols - min_log2_tile_cols;
@@ -1073,13 +1073,13 @@
if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
}
-static int get_refresh_mask(VP10_COMP *cpi) {
- if (vp10_preserve_existing_gf(cpi)) {
+static int get_refresh_mask(AV1_COMP *cpi) {
+ if (av1_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term we leave it in the GF slot and,
// if we're updating the GF with the current decoded frame, we save it
// instead to the ARF slot.
- // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+ // Later, in the function av1_encoder.c:av1_update_reference_frames() we
// will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
// there so that it can be done outside of the recode loop.
// Note: This is highly specific to the use of ARF as a forward reference,
@@ -1099,9 +1099,9 @@
}
}
-static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
+static size_t encode_tiles(AV1_COMP *cpi, uint8_t *data_ptr,
unsigned int *max_tile_sz) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
aom_writer residual_bc;
int tile_row, tile_col;
TOKENEXTRA *tok_end;
@@ -1149,7 +1149,7 @@
return total_size;
}
-static void write_render_size(const VP10_COMMON *cm,
+static void write_render_size(const AV1_COMMON *cm,
struct aom_write_bit_buffer *wb) {
const int scaling_active =
cm->width != cm->render_width || cm->height != cm->render_height;
@@ -1160,7 +1160,7 @@
}
}
-static void write_frame_size(const VP10_COMMON *cm,
+static void write_frame_size(const AV1_COMMON *cm,
struct aom_write_bit_buffer *wb) {
aom_wb_write_literal(wb, cm->width - 1, 16);
aom_wb_write_literal(wb, cm->height - 1, 16);
@@ -1168,9 +1168,9 @@
write_render_size(cm, wb);
}
-static void write_frame_size_with_refs(VP10_COMP *cpi,
+static void write_frame_size_with_refs(AV1_COMP *cpi,
struct aom_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int found = 0;
MV_REFERENCE_FRAME ref_frame;
@@ -1206,9 +1206,9 @@
}
static void write_sync_code(struct aom_write_bit_buffer *wb) {
- aom_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
- aom_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
- aom_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
}
static void write_profile(BITSTREAM_PROFILE profile,
@@ -1223,7 +1223,7 @@
}
static void write_bitdepth_colorspace_sampling(
- VP10_COMMON *const cm, struct aom_write_bit_buffer *wb) {
+ AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
if (cm->profile >= PROFILE_2) {
assert(cm->bit_depth > VPX_BITS_8);
aom_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
@@ -1246,9 +1246,9 @@
}
}
-static void write_uncompressed_header(VP10_COMP *cpi,
+static void write_uncompressed_header(AV1_COMP *cpi,
struct aom_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
aom_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
@@ -1358,8 +1358,8 @@
write_tile_info(cm, wb);
}
-static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
- VP10_COMMON *const cm = &cpi->common;
+static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
+ AV1_COMMON *const cm = &cpi->common;
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = cpi->td.counts;
aom_writer header_bc;
@@ -1386,32 +1386,32 @@
update_seg_probs(cpi, &header_bc);
for (i = 0; i < INTRA_MODES; ++i)
- prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
counts->uv_mode[i], INTRA_MODES, &header_bc);
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[i],
counts->partition[i], PARTITION_TYPES, &header_bc);
#endif
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
#if CONFIG_MISC_FIXES
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
- prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+ prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
counts->kf_y_mode[i][j], INTRA_MODES, &header_bc);
#endif
} else {
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+ prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, &header_bc);
if (cm->interp_filter == SWITCHABLE)
update_switchable_interp_probs(cm, &header_bc, counts);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
+ av1_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
counts->intra_inter[i]);
if (cpi->allow_comp_inter_inter) {
@@ -1424,42 +1424,42 @@
aom_write_bit(&header_bc, use_hybrid_pred);
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
+ av1_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
counts->comp_inter[i]);
}
#else
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
+ av1_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
counts->comp_inter[i]);
#endif
}
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; i++) {
- vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
+ av1_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
counts->single_ref[i][0]);
- vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
+ av1_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
counts->single_ref[i][1]);
}
}
if (cm->reference_mode != SINGLE_REFERENCE)
for (i = 0; i < REF_CONTEXTS; i++)
- vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
+ av1_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
counts->comp_ref[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
- prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, &header_bc);
#if !CONFIG_MISC_FIXES
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[i],
counts->partition[i], PARTITION_TYPES, &header_bc);
#endif
- vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
+ av1_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
&counts->mv);
update_ext_tx_probs(cm, &header_bc);
}
@@ -1505,14 +1505,14 @@
}
#endif
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size) {
uint8_t *data = dest;
size_t first_part_size, uncompressed_hdr_size, data_sz;
struct aom_write_bit_buffer wb = { data, 0 };
struct aom_write_bit_buffer saved_wb;
unsigned int max_tile;
#if CONFIG_MISC_FIXES
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
const int have_tiles = n_log2_tiles > 0;
#else
diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h
index e8e4581..a9bb97a 100644
--- a/av1/encoder/bitstream.h
+++ b/av1/encoder/bitstream.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_BITSTREAM_H_
-#define VP10_ENCODER_BITSTREAM_H_
+#ifndef AV1_ENCODER_BITSTREAM_H_
+#define AV1_ENCODER_BITSTREAM_H_
#ifdef __cplusplus
extern "C" {
@@ -18,10 +18,10 @@
#include "av1/encoder/encoder.h"
-void vp10_encode_token_init();
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+void av1_encode_token_init();
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size);
-static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
+static INLINE int av1_preserve_existing_gf(AV1_COMP *cpi) {
return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
cpi->rc.is_src_frame_alt_ref;
}
@@ -30,4 +30,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_BITSTREAM_H_
+#endif // AV1_ENCODER_BITSTREAM_H_
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 1346ba6..45d853c 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_BLOCK_H_
-#define VP10_ENCODER_BLOCK_H_
+#ifndef AV1_ENCODER_BLOCK_H_
+#define AV1_ENCODER_BLOCK_H_
#include "av1/common/entropymv.h"
#include "av1/common/entropy.h"
@@ -45,7 +45,7 @@
/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
* coefficient in this block was zero) or not. */
-typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
+typedef unsigned int av1_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
[COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef struct {
@@ -117,7 +117,7 @@
int encode_breakout;
// note that token_costs is the cost when eob node is skipped
- vp10_coeff_cost token_costs[TX_SIZES];
+ av1_coeff_cost token_costs[TX_SIZES];
int optimize;
@@ -147,4 +147,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_BLOCK_H_
+#endif // AV1_ENCODER_BLOCK_H_
diff --git a/av1/encoder/blockiness.c b/av1/encoder/blockiness.c
index 36d4410..49c5dad 100644
--- a/av1/encoder/blockiness.c
+++ b/av1/encoder/blockiness.c
@@ -120,7 +120,7 @@
// This function returns the blockiness for the entire frame currently by
// looking at all borders in steps of 4.
-double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
const unsigned char *img2, int img2_pitch, int width,
int height) {
double blockiness = 0;
diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c
index d89022e..1a5a663 100644
--- a/av1/encoder/context_tree.c
+++ b/av1/encoder/context_tree.c
@@ -16,7 +16,7 @@
BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
};
-static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
+static void alloc_mode_context(AV1_COMMON *cm, int num_4x4_blk,
PICK_MODE_CONTEXT *ctx) {
const int num_blk = (num_4x4_blk < 4 ? 4 : num_4x4_blk);
const int num_pix = num_blk << 4;
@@ -65,7 +65,7 @@
}
}
-static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
+static void alloc_tree_contexts(AV1_COMMON *cm, PC_TREE *tree,
int num_4x4_blk) {
alloc_mode_context(cm, num_4x4_blk, &tree->none);
alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
@@ -92,7 +92,7 @@
// partition level. There are contexts for none, horizontal, vertical, and
// split. Along with a block_size value and a selected block_size which
// represents the state of our search.
-void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
+void av1_setup_pc_tree(AV1_COMMON *cm, ThreadData *td) {
int i, j;
const int leaf_nodes = 64;
const int tree_nodes = 64 + 16 + 4 + 1;
@@ -141,7 +141,7 @@
td->pc_root[0].none.best_mode_index = 2;
}
-void vp10_free_pc_tree(ThreadData *td) {
+void av1_free_pc_tree(ThreadData *td) {
const int tree_nodes = 64 + 16 + 4 + 1;
int i;
diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h
index c22b866..391ff90 100644
--- a/av1/encoder/context_tree.h
+++ b/av1/encoder/context_tree.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_CONTEXT_TREE_H_
-#define VP10_ENCODER_CONTEXT_TREE_H_
+#ifndef AV1_ENCODER_CONTEXT_TREE_H_
+#define AV1_ENCODER_CONTEXT_TREE_H_
#include "av1/common/blockd.h"
#include "av1/encoder/block.h"
@@ -19,8 +19,8 @@
extern "C" {
#endif
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
struct ThreadData;
// Structure to hold snapshot of coding context during the mode picking process
@@ -78,11 +78,11 @@
};
} PC_TREE;
-void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_pc_tree(struct ThreadData *td);
+void av1_setup_pc_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_pc_tree(struct ThreadData *td);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */
+#endif /* AV1_ENCODER_CONTEXT_TREE_H_ */
diff --git a/av1/encoder/cost.c b/av1/encoder/cost.c
index acf4a47..a84f7d3 100644
--- a/av1/encoder/cost.c
+++ b/av1/encoder/cost.c
@@ -15,7 +15,7 @@
/* round(-log2(i/256.) * (1 << VP9_PROB_COST_SHIFT))
Begins and ends with a bogus entry to satisfy use of prob=0 in the firstpass.
https://code.google.com/p/webm/issues/detail?id=1089 */
-const uint16_t vp10_prob_cost[257] = {
+const uint16_t av1_prob_cost[257] = {
4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
@@ -44,7 +44,7 @@
int b;
for (b = 0; b <= 1; ++b) {
- const int cc = c + vp10_cost_bit(prob, b);
+ const int cc = c + av1_cost_bit(prob, b);
const aom_tree_index ii = tree[i + b];
if (ii <= 0)
@@ -54,13 +54,13 @@
}
}
-void vp10_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
cost(costs, tree, probs, 0, 0);
}
-void vp10_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
assert(tree[0] <= 0 && tree[1] > 0);
- costs[-tree[0]] = vp10_cost_bit(probs[0], 0);
+ costs[-tree[0]] = av1_cost_bit(probs[0], 0);
cost(costs, tree, probs, 2, 0);
}
diff --git a/av1/encoder/cost.h b/av1/encoder/cost.h
index be12464..374f7d9 100644
--- a/av1/encoder/cost.h
+++ b/av1/encoder/cost.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_COST_H_
-#define VP10_ENCODER_COST_H_
+#ifndef AV1_ENCODER_COST_H_
+#define AV1_ENCODER_COST_H_
#include "aom_dsp/prob.h"
#include "aom/aom_integer.h"
@@ -19,20 +19,20 @@
extern "C" {
#endif
-extern const uint16_t vp10_prob_cost[257];
+extern const uint16_t av1_prob_cost[257];
-// The factor to scale from cost in bits to cost in vp10_prob_cost units.
+// The factor to scale from cost in bits to cost in av1_prob_cost units.
#define VP9_PROB_COST_SHIFT 9
-#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
+#define av1_cost_zero(prob) (av1_prob_cost[prob])
-#define vp10_cost_one(prob) vp10_cost_zero(256 - (prob))
+#define av1_cost_one(prob) av1_cost_zero(256 - (prob))
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) : (prob))
+#define av1_cost_bit(prob, bit) av1_cost_zero((bit) ? 256 - (prob) : (prob))
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
aom_prob p) {
- return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
+ return ct[0] * av1_cost_zero(p) + ct[1] * av1_cost_one(p);
}
static INLINE int treed_cost(aom_tree tree, const aom_prob *probs, int bits,
@@ -42,18 +42,18 @@
do {
const int bit = (bits >> --len) & 1;
- cost += vp10_cost_bit(probs[i >> 1], bit);
+ cost += av1_cost_bit(probs[i >> 1], bit);
i = tree[i + bit];
} while (len);
return cost;
}
-void vp10_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
-void vp10_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_COST_H_
+#endif // AV1_ENCODER_COST_H_
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index 30456f8..803c962 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -293,7 +293,7 @@
out[15] = (tran_low_t)fdct_round_shift(temp2);
}
-/* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32
+/* TODO(angiebird): Unify this with av1_fwd_txfm.c: av1_fdct32
static void fdct32(const tran_low_t *input, tran_low_t *output) {
tran_high_t temp;
tran_low_t step[32];
@@ -988,7 +988,7 @@
{ fadst16, fadst16 } // ADST_ADST = 3
};
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
if (tx_type == DCT_DCT) {
aom_fdct4x4_c(input, output, stride);
@@ -1015,7 +1015,7 @@
}
}
-void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
+void av1_fdct8x8_quant_c(const int16_t *input, int stride,
tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -1141,7 +1141,7 @@
*eob_ptr = eob + 1;
}
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
if (tx_type == DCT_DCT) {
aom_fdct8x8_c(input, output, stride);
@@ -1170,7 +1170,7 @@
/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
pixel. */
-void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
int i;
tran_high_t a1, b1, c1, d1, e1;
const int16_t *ip_pass0 = input;
@@ -1224,7 +1224,7 @@
}
}
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
if (tx_type == DCT_DCT) {
aom_fdct16x16_c(input, output, stride);
@@ -1251,24 +1251,24 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht4x4_c(input, output, stride, tx_type);
+ av1_fht4x4_c(input, output, stride, tx_type);
}
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
- vp10_fht8x8_c(input, output, stride, tx_type);
+ av1_fht8x8_c(input, output, stride, tx_type);
}
-void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
int stride) {
- vp10_fwht4x4_c(input, output, stride);
+ av1_fwht4x4_c(input, output, stride);
}
-void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
int stride, int tx_type) {
- vp10_fht16x16_c(input, output, stride, tx_type);
+ av1_fht16x16_c(input, output, stride, tx_type);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 3495262..f5808ad 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -47,7 +47,7 @@
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
@@ -63,7 +63,7 @@
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -95,9 +95,9 @@
128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
128 * 16
};
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs) {
unsigned int sse;
@@ -106,8 +106,8 @@
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-#if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
@@ -131,9 +131,9 @@
}
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
+static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
int mi_row, int mi_col,
BLOCK_SIZE bs) {
@@ -148,7 +148,7 @@
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
-static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
+static BLOCK_SIZE get_rd_var_based_fixed_partition(AV1_COMP *cpi,
MACROBLOCK *x, int mi_row,
int mi_col) {
unsigned int var = get_sby_perpixel_diff_variance(
@@ -165,21 +165,21 @@
// Lighter version of set_offsets that only sets the mode info
// pointers.
-static INLINE void set_mode_info_offsets(VP10_COMP *const cpi,
+static INLINE void set_mode_info_offsets(AV1_COMP *const cpi,
MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row,
int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
-static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCK *const x, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -193,7 +193,7 @@
mbmi = &xd->mi[0]->mbmi;
// Set up destination pointers.
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
// Set up limit values for MV components.
// Mv beyond the range do not produce new/different prediction block.
@@ -208,7 +208,7 @@
cm->mi_cols);
// Set up source buffers.
- vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+ av1_setup_src_planes(x, cpi->Source, mi_row, mi_col);
// R/D setup.
x->rddiv = cpi->rd.RDDIV;
@@ -221,7 +221,7 @@
seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
- vp10_init_plane_quantizers(cpi, x);
+ av1_init_plane_quantizers(cpi, x);
x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
} else {
@@ -229,11 +229,11 @@
x->encode_breakout = cpi->encode_breakout;
}
- // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs()
+ // required by av1_append_sub8x8_mvs_for_idx() and av1_find_best_ref_mvs()
xd->tile = *tile;
}
-static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_block_size(AV1_COMP *const cpi, MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
@@ -368,12 +368,12 @@
&node.part_variances->none);
}
-static int set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+static int set_vt_partitioning(AV1_COMP *cpi, MACROBLOCK *const x,
MACROBLOCKD *const xd, void *data,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int64_t threshold, BLOCK_SIZE bsize_min,
int force_split) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
@@ -452,8 +452,8 @@
// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
// currently only used on key frame.
-static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[], int q) {
+ AV1_COMMON *const cm = &cpi->common;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
const int threshold_multiplier = is_key_frame ? 20 : 1;
const int64_t threshold_base =
@@ -478,8 +478,8 @@
}
}
-void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q) {
+ AV1_COMMON *const cm = &cpi->common;
SPEED_FEATURES *const sf = &cpi->sf;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
if (sf->partition_search_type != VAR_BASED_PARTITION &&
@@ -507,7 +507,7 @@
// Compute the minmax over the 8x8 subblocks.
static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
int dp, int x16_idx, int y16_idx,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int highbd_flag,
#endif
int pixels_wide, int pixels_high) {
@@ -521,7 +521,7 @@
int min = 0;
int max = 0;
if (x8_idx < pixels_wide && y8_idx < pixels_high) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
aom_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
d + y8_idx * dp + x8_idx, dp, &min, &max);
@@ -542,7 +542,7 @@
static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
int dp, int x8_idx, int y8_idx, v8x8 *vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int highbd_flag,
#endif
int pixels_wide, int pixels_high,
@@ -556,7 +556,7 @@
if (x4_idx < pixels_wide && y4_idx < pixels_high) {
int s_avg;
int d_avg = 128;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = aom_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
if (!is_key_frame)
@@ -578,7 +578,7 @@
static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
int dp, int x16_idx, int y16_idx, v16x16 *vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int highbd_flag,
#endif
int pixels_wide, int pixels_high,
@@ -592,7 +592,7 @@
if (x8_idx < pixels_wide && y8_idx < pixels_high) {
int s_avg;
int d_avg = 128;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = aom_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
if (!is_key_frame)
@@ -614,9 +614,9 @@
// This function chooses partitioning based on the variance between source and
// reconstructed last, where variance is computed for down-sampled inputs.
-static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static int choose_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCK *x, int mi_row, int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int i, j, k, m;
v64x64 vt;
@@ -643,7 +643,7 @@
segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
if (cyclic_refresh_segment_id_boosted(segment_id)) {
- int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ int q = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
set_vbp_thresholds(cpi, thresholds, q);
}
}
@@ -670,7 +670,7 @@
yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
if (yv12_g && yv12_g != yv12) {
- vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+ av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
&cm->frame_refs[GOLDEN_FRAME - 1].sf);
y_sad_g = cpi->fn_ptr[bsize].sdf(
x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
@@ -679,7 +679,7 @@
y_sad_g = UINT_MAX;
}
- vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
+ av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
&cm->frame_refs[LAST_FRAME - 1].sf);
mbmi->ref_frame[0] = LAST_FRAME;
mbmi->ref_frame[1] = NONE;
@@ -687,9 +687,9 @@
mbmi->mv[0].as_int = 0;
mbmi->interp_filter = BILINEAR;
- y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
+ y_sad = av1_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
if (y_sad_g < y_sad) {
- vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+ av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
&cm->frame_refs[GOLDEN_FRAME - 1].sf);
mbmi->ref_frame[0] = GOLDEN_FRAME;
mbmi->mv[0].as_int = 0;
@@ -698,7 +698,7 @@
x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
}
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
for (i = 1; i <= 2; ++i) {
struct macroblock_plane *p = &x->plane[i];
@@ -731,7 +731,7 @@
} else {
d = VP9_VAR_OFFS;
dp = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (xd->bd) {
case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
@@ -740,7 +740,7 @@
default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
// Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
@@ -762,7 +762,7 @@
variance4x4downsample[i2 + j] = 0;
if (!is_key_frame) {
fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->cur_buf->flags,
#endif
pixels_wide, pixels_high, is_key_frame);
@@ -781,7 +781,7 @@
// compute the minmax over the 8x8 sub-blocks, and if above threshold,
// force split to 8x8 block for this 16x16 block.
int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->cur_buf->flags,
#endif
pixels_wide, pixels_high);
@@ -803,7 +803,7 @@
int y8_idx = y16_idx + ((k >> 1) << 3);
v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->cur_buf->flags,
#endif
pixels_wide, pixels_high, is_key_frame);
@@ -891,11 +891,11 @@
return 0;
}
-static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
+static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
int i, x_idx, y;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -933,7 +933,7 @@
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+ av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
bsize, ctx->rate, ctx->dist, x->skip);
}
}
@@ -964,7 +964,7 @@
xd->mi[x_idx + y * mis] = mi_addr;
}
- if (cpi->oxcf.aq_mode) vp10_init_plane_quantizers(cpi, x);
+ if (cpi->oxcf.aq_mode) av1_init_plane_quantizers(cpi, x);
if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -994,10 +994,10 @@
#endif
if (!frame_is_intra_only(cm)) {
if (is_inter_block(mbmi)) {
- vp10_update_mv_count(td);
+ av1_update_mv_count(td);
if (cm->interp_filter == SWITCHABLE) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
++td->counts->switchable_interp[ctx][mbmi->interp_filter];
}
}
@@ -1022,7 +1022,7 @@
}
}
-void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
@@ -1037,21 +1037,21 @@
x->e_mbd.plane[i].subsampling_y);
}
-static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
int8_t segment_id) {
int segment_qindex;
- VP10_COMMON *const cm = &cpi->common;
- vp10_init_plane_quantizers(cpi, x);
+ AV1_COMMON *const cm = &cpi->common;
+ av1_init_plane_quantizers(cpi, x);
aom_clear_system_state();
- segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
- return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
+ segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
-static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *const x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
@@ -1086,28 +1086,28 @@
// Set to zero to make sure we do not use the previous encoded frame stats
mbmi->skip = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->source_variance = vp10_high_get_sby_perpixel_variance(
+ x->source_variance = av1_high_get_sby_perpixel_variance(
cpi, &x->plane[0].src, bsize, xd->bd);
} else {
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
}
#else
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Save rdmult before it might be changed, so it can be restored later.
orig_rdmult = x->rdmult;
if (aq_mode == VARIANCE_AQ) {
const int energy =
- bsize <= BLOCK_16X16 ? x->mb_energy : vp10_block_energy(cpi, x, bsize);
+ bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
- mbmi->segment_id = vp10_vaq_segment_id(energy);
+ mbmi->segment_id = av1_vaq_segment_id(energy);
} else {
const uint8_t *const map =
cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
@@ -1122,23 +1122,23 @@
// If segment is boosted, use rdmult for that segment.
if (cyclic_refresh_segment_id_boosted(
get_segment_id(cm, map, bsize, mi_row, mi_col)))
- x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
+ x->rdmult = av1_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
}
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (frame_is_intra_only(cm)) {
- vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+ av1_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
} else {
if (bsize >= BLOCK_8X8) {
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
- vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
+ av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
ctx, best_rd);
else
- vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ av1_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
bsize, ctx, best_rd);
} else {
- vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ av1_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
bsize, ctx, best_rd);
}
}
@@ -1148,7 +1148,7 @@
(bsize >= BLOCK_16X16) &&
(cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
- vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
+ av1_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
}
x->rdmult = orig_rdmult;
@@ -1161,7 +1161,7 @@
ctx->dist = rd_cost->dist;
}
-static void update_stats(VP10_COMMON *cm, ThreadData *td) {
+static void update_stats(AV1_COMMON *cm, ThreadData *td) {
const MACROBLOCK *x = &td->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
const MODE_INFO *const mi = xd->mi[0];
@@ -1175,24 +1175,24 @@
const int seg_ref_active =
segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME);
if (!seg_ref_active) {
- counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
+ counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++;
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
if (inter_block) {
const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- counts->comp_inter[vp10_get_reference_mode_context(
+ counts->comp_inter[av1_get_reference_mode_context(
cm, xd)][has_second_ref(mbmi)]++;
if (has_second_ref(mbmi)) {
- counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+ counts->comp_ref[av1_get_pred_context_comp_ref_p(
cm, xd)][ref0 == GOLDEN_FRAME]++;
} else {
- counts->single_ref[vp10_get_pred_context_single_ref_p1(
+ counts->single_ref[av1_get_pred_context_single_ref_p1(
xd)][0][ref0 != LAST_FRAME]++;
if (ref0 != LAST_FRAME)
- counts->single_ref[vp10_get_pred_context_single_ref_p2(
+ counts->single_ref[av1_get_pred_context_single_ref_p2(
xd)][1][ref0 != GOLDEN_FRAME]++;
}
}
@@ -1277,7 +1277,7 @@
sizeof(xd->left_seg_context[0]) * mi_height);
}
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
+static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
@@ -1291,11 +1291,11 @@
}
}
-static void encode_sb(VP10_COMP *cpi, ThreadData *td,
+static void encode_sb(AV1_COMP *cpi, ThreadData *td,
const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
int mi_col, int output_enabled, BLOCK_SIZE bsize,
PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1402,10 +1402,10 @@
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
-static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_fixed_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mi_8x8, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
@@ -1433,12 +1433,12 @@
}
}
-static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, MODE_INFO **mi_8x8,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon, PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1463,9 +1463,9 @@
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
- vp10_rd_cost_reset(&last_part_rdc);
- vp10_rd_cost_reset(&none_rdc);
- vp10_rd_cost_reset(&chosen_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&none_rdc);
+ av1_rd_cost_reset(&chosen_rdc);
partition = partition_lookup[bsl][bs_type];
subsize = get_subsize(bsize, partition);
@@ -1475,7 +1475,7 @@
if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- x->mb_energy = vp10_block_energy(cpi, x, bsize);
+ x->mb_energy = av1_block_energy(cpi, x, bsize);
}
if (do_partition_search &&
@@ -1529,13 +1529,13 @@
mi_row + (mi_step >> 1) < cm->mi_rows) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
&tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
break;
}
last_part_rdc.rate += tmp_rdc.rate;
@@ -1550,14 +1550,14 @@
mi_col + (mi_step >> 1) < cm->mi_cols) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
&tmp_rdc, subsize,
&pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
break;
}
last_part_rdc.rate += tmp_rdc.rate;
@@ -1582,13 +1582,13 @@
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
- vp10_rd_cost_init(&tmp_rdc);
+ av1_rd_cost_init(&tmp_rdc);
rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
tp, mi_row + y_idx, mi_col + x_idx, subsize,
&tmp_rdc.rate, &tmp_rdc.dist, i != 3,
pc_tree->split[i]);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&last_part_rdc);
+ av1_rd_cost_reset(&last_part_rdc);
break;
}
last_part_rdc.rate += tmp_rdc.rate;
@@ -1638,7 +1638,7 @@
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
- vp10_rd_cost_reset(&chosen_rdc);
+ av1_rd_cost_reset(&chosen_rdc);
break;
}
@@ -1740,11 +1740,11 @@
// Look at neighboring blocks and set a min and max partition size based on
// what they chose.
-static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
+static void rd_auto_partition_range(AV1_COMP *cpi, const TileInfo *const tile,
MACROBLOCKD *const xd, int mi_row,
int mi_col, BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MODE_INFO **mi = xd->mi;
const int left_in_image = xd->left_available && mi[-1];
const int above_in_image = xd->up_available && mi[-xd->mi_stride];
@@ -1795,7 +1795,7 @@
// Test for blocks at the edge of the active image.
// This may be the actual edge of the image or where there are formatting
// bars.
- if (vp10_active_edge_sb(cpi, mi_row, mi_col)) {
+ if (av1_active_edge_sb(cpi, mi_row, mi_col)) {
min_size = BLOCK_4X4;
} else {
min_size =
@@ -1815,7 +1815,7 @@
}
// TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
int mi_col, BLOCK_SIZE bsize,
BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -1927,12 +1927,12 @@
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
-static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE bsize,
RD_COST *rd_cost, int64_t best_rd,
PC_TREE *pc_tree) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1971,15 +1971,15 @@
assert(num_8x8_blocks_wide_lookup[bsize] ==
num_8x8_blocks_high_lookup[bsize]);
- vp10_rd_cost_init(&this_rdc);
- vp10_rd_cost_init(&sum_rdc);
- vp10_rd_cost_reset(&best_rdc);
+ av1_rd_cost_init(&this_rdc);
+ av1_rd_cost_init(&sum_rdc);
+ av1_rd_cost_reset(&best_rdc);
best_rdc.rdcost = best_rd;
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
- x->mb_energy = vp10_block_energy(cpi, x, bsize);
+ x->mb_energy = av1_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
int cb_partition_search_ctrl =
@@ -2215,7 +2215,7 @@
// PARTITION_HORZ
if (partition_horz_allowed &&
- (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
+ (do_rect || av1_active_h_edge(cpi, mi_row, mi_step))) {
subsize = get_subsize(bsize, PARTITION_HORZ);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
@@ -2259,7 +2259,7 @@
}
// PARTITION_VERT
if (partition_vert_allowed &&
- (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
+ (do_rect || av1_active_v_edge(cpi, mi_col, mi_step))) {
subsize = get_subsize(bsize, PARTITION_VERT);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
@@ -2325,10 +2325,10 @@
}
}
-static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td,
TileDataEnc *tile_data, int mi_row,
TOKENEXTRA **tp) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2363,7 +2363,7 @@
}
}
- vp10_zero(x->pred_mv);
+ av1_zero(x->pred_mv);
td->pc_root->index = 0;
if (seg->enabled) {
@@ -2406,16 +2406,16 @@
}
}
-static void init_encode_frame_mb_context(VP10_COMP *cpi) {
+static void init_encode_frame_mb_context(AV1_COMP *cpi) {
MACROBLOCK *const x = &cpi->td.mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
// Copy data over into macro block data structures.
- vp10_setup_src_planes(x, cpi->Source, 0, 0);
+ av1_setup_src_planes(x, cpi->Source, 0, 0);
- vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
@@ -2425,7 +2425,7 @@
sizeof(*xd->above_seg_context) * aligned_mi_cols);
}
-static int check_dual_ref_flags(VP10_COMP *cpi) {
+static int check_dual_ref_flags(AV1_COMP *cpi) {
const int ref_flags = cpi->ref_frame_flags;
if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
@@ -2436,7 +2436,7 @@
}
}
-static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
+static void reset_skip_tx_size(AV1_COMMON *cm, TX_SIZE max_tx_size) {
int mi_row, mi_col;
const int mis = cm->mi_stride;
MODE_INFO **mi_ptr = cm->mi_grid_visible;
@@ -2449,7 +2449,7 @@
}
}
-static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
+static MV_REFERENCE_FRAME get_frame_type(const AV1_COMP *cpi) {
if (frame_is_intra_only(&cpi->common))
return INTRA_FRAME;
else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
@@ -2460,7 +2460,7 @@
return LAST_FRAME;
}
-static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
+static TX_MODE select_tx_mode(const AV1_COMP *cpi, MACROBLOCKD *const xd) {
if (xd->lossless[0]) return ONLY_4X4;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
return ALLOW_32X32;
@@ -2471,8 +2471,8 @@
return cpi->common.tx_mode;
}
-void vp10_init_tile_data(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_init_tile_data(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
int tile_col, tile_row;
@@ -2503,7 +2503,7 @@
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
TileInfo *tile_info =
&cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
- vp10_tile_init(tile_info, cm, tile_row, tile_col);
+ av1_tile_init(tile_info, cm, tile_row, tile_col);
cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
pre_tok = cpi->tile_tok[tile_row][tile_col];
@@ -2512,9 +2512,9 @@
}
}
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
+void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
int tile_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
const TileInfo *const tile_info = &this_tile->tile_info;
@@ -2535,22 +2535,22 @@
allocated_tokens(*tile_info));
}
-static void encode_tiles(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void encode_tiles(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
int tile_col, tile_row;
- vp10_init_tile_data(cpi);
+ av1_init_tile_data(cpi);
for (tile_row = 0; tile_row < tile_rows; ++tile_row)
for (tile_col = 0; tile_col < tile_cols; ++tile_col)
- vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col);
+ av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
}
#if CONFIG_FP_MB_STATS
static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
- VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
+ AV1_COMMON *cm, uint8_t **this_frame_mb_stats) {
uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
cm->current_video_frame * cm->MBs * sizeof(uint8_t);
@@ -2562,10 +2562,10 @@
}
#endif
-static void encode_frame_internal(VP10_COMP *cpi) {
+static void encode_frame_internal(AV1_COMP *cpi) {
ThreadData *const td = &cpi->td;
MACROBLOCK *const x = &td->mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
RD_COUNTS *const rdc = &cpi->td.rd_counts;
int i;
@@ -2573,16 +2573,16 @@
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
- vp10_zero(*td->counts);
- vp10_zero(rdc->coef_counts);
- vp10_zero(rdc->comp_pred_diff);
- vp10_zero(rdc->filter_diff);
+ av1_zero(*td->counts);
+ av1_zero(rdc->coef_counts);
+ av1_zero(rdc->comp_pred_diff);
+ av1_zero(rdc->filter_diff);
rdc->m_search_count = 0; // Count of motion search hits.
rdc->ex_search_count = 0; // Exhaustive mesh search hits.
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
- ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2592,10 +2592,10 @@
cm->tx_mode = select_tx_mode(cpi, xd);
- vp10_frame_init_quantizer(cpi);
+ av1_frame_init_quantizer(cpi);
- vp10_initialize_rd_consts(cpi);
- vp10_initialize_me_consts(cpi, x, cm->base_qindex);
+ av1_initialize_rd_consts(cpi);
+ av1_initialize_me_consts(cpi, x, cm->base_qindex);
init_encode_frame_mb_context(cpi);
cm->use_prev_frame_mvs =
!cm->error_resilient_mode && cm->width == cm->last_width &&
@@ -2606,7 +2606,7 @@
cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
x->quant_fp = cpi->sf.use_quant_fp;
- vp10_zero(x->skip_txfm);
+ av1_zero(x->skip_txfm);
{
struct aom_usec_timer emr_timer;
@@ -2621,7 +2621,7 @@
// If allowed, encoding tiles in parallel with one thread handling one tile.
if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
- vp10_encode_tiles_mt(cpi);
+ av1_encode_tiles_mt(cpi);
else
encode_tiles(cpi);
@@ -2651,8 +2651,8 @@
}
}
-void vp10_encode_frame(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_encode_frame(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
@@ -2728,10 +2728,10 @@
if (comp_count_zero == 0) {
cm->reference_mode = SINGLE_REFERENCE;
- vp10_zero(counts->comp_inter);
+ av1_zero(counts->comp_inter);
} else if (single_count_zero == 0) {
cm->reference_mode = COMPOUND_REFERENCE;
- vp10_zero(counts->comp_inter);
+ av1_zero(counts->comp_inter);
}
}
@@ -2791,8 +2791,8 @@
const int bidx = idy * 2 + idx;
const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode;
if (intraonly) {
- const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx);
- const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx);
+ const PREDICTION_MODE a = av1_above_block_mode(mi, above_mi, bidx);
+ const PREDICTION_MODE l = av1_left_block_mode(mi, left_mi, bidx);
++counts->kf_y_mode[a][l][bmode];
} else {
++counts->y_mode[0][bmode];
@@ -2800,8 +2800,8 @@
}
} else {
if (intraonly) {
- const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0);
- const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0);
+ const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, 0);
+ const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, 0);
++counts->kf_y_mode[above][left][y_mode];
} else {
++counts->y_mode[size_group_lookup[bsize]][y_mode];
@@ -2811,10 +2811,10 @@
++counts->uv_mode[y_mode][uv_mode];
}
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
@@ -2841,11 +2841,11 @@
int plane;
mbmi->skip = 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
- vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane);
+ av1_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane);
if (output_enabled)
sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
frame_is_intra_only(cm));
- vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
} else {
int ref;
const int is_compound = has_second_ref(mbmi);
@@ -2853,18 +2853,18 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
assert(cfg != NULL);
- vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
+ av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
&xd->block_refs[ref]->sf);
}
if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
- vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
+ av1_build_inter_predictors_sby(xd, mi_row, mi_col,
VPXMAX(bsize, BLOCK_8X8));
- vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col,
+ av1_build_inter_predictors_sbuv(xd, mi_row, mi_col,
VPXMAX(bsize, BLOCK_8X8));
- vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
- vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+ av1_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
+ av1_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
}
if (output_enabled) {
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
index 2b70d73..59936db 100644
--- a/av1/encoder/encodeframe.h
+++ b/av1/encoder/encodeframe.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_ENCODEFRAME_H_
-#define VP10_ENCODER_ENCODEFRAME_H_
+#ifndef AV1_ENCODER_ENCODEFRAME_H_
+#define AV1_ENCODER_ENCODEFRAME_H_
#include "aom/aom_integer.h"
@@ -20,7 +20,7 @@
struct macroblock;
struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
// Constants used in SOURCE_VAR_BASED_PARTITION
@@ -30,20 +30,20 @@
#define VAR_HIST_LARGE_CUT_OFF 75
#define VAR_HIST_SMALL_CUT_OFF 45
-void vp10_setup_src_planes(struct macroblock *x,
+void av1_setup_src_planes(struct macroblock *x,
const struct yv12_buffer_config *src, int mi_row,
int mi_col);
-void vp10_encode_frame(struct VP10_COMP *cpi);
+void av1_encode_frame(struct AV1_COMP *cpi);
-void vp10_init_tile_data(struct VP10_COMP *cpi);
-void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
+void av1_init_tile_data(struct AV1_COMP *cpi);
+void av1_encode_tile(struct AV1_COMP *cpi, struct ThreadData *td,
int tile_row, int tile_col);
-void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
+void av1_set_variance_partition_thresholds(struct AV1_COMP *cpi, int q);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEFRAME_H_
+#endif // AV1_ENCODER_ENCODEFRAME_H_
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index a7d5f50..c2d4ae8 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -31,21 +31,21 @@
ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
};
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
p->src.stride, pd->dst.buf, pd->dst.stride,
x->e_mbd.bd);
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride);
}
@@ -54,13 +54,13 @@
(((1 << (VP9_PROB_COST_SHIFT - 1)) + (R) * (RM)) & \
((1 << VP9_PROB_COST_SHIFT) - 1))
-typedef struct vp10_token_state {
+typedef struct av1_token_state {
int rate;
int error;
int next;
int16_t token;
short qc;
-} vp10_token_state;
+} av1_token_state;
// TODO(jimbankoski): experiment to find optimal RD numbers.
static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
@@ -80,7 +80,7 @@
static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
int idx, int token, uint8_t *token_cache) {
int bak = token_cache[scan[idx]], pt;
- token_cache[scan[idx]] = vp10_pt_energy_class[token];
+ token_cache[scan[idx]] = av1_pt_energy_class[token];
pt = get_coef_context(nb, token_cache, idx + 1);
token_cache[scan[idx]] = bak;
return pt;
@@ -92,7 +92,7 @@
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
const int ref = is_inter_block(&xd->mi[0]->mbmi);
- vp10_token_state tokens[1025][2];
+ av1_token_state tokens[1025][2];
unsigned best_index[1025][2];
uint8_t token_cache[1024];
const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -120,10 +120,10 @@
int16_t t0, t1;
EXTRABIT e0;
int best, band, pt, i, final_eob;
-#if CONFIG_VPX_HIGHBITDEPTH
- const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
#else
- const int *cat6_high_cost = vp10_get_high_cost_table(8);
+ const int *cat6_high_cost = av1_get_high_cost_table(8);
#endif
assert((!type && !plane) || (type && plane));
@@ -142,7 +142,7 @@
for (i = 0; i < eob; i++)
token_cache[scan[i]] =
- vp10_pt_energy_class[vp10_get_token(qcoeff[scan[i]])];
+ av1_pt_energy_class[av1_get_token(qcoeff[scan[i]])];
for (i = eob; i-- > 0;) {
int base_bits, d2, dx;
@@ -160,7 +160,7 @@
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- vp10_get_token_extra(x, &t0, &e0);
+ av1_get_token_extra(x, &t0, &e0);
/* Consider both possible successor states. */
if (next < default_eob) {
band = band_translate[i + 1];
@@ -175,13 +175,13 @@
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = vp10_get_cost(t0, e0, cat6_high_cost);
+ base_bits = av1_get_cost(t0, e0, cat6_high_cost);
dx = mul * (dqcoeff[rc] - coeff[rc]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
dx >>= xd->bd - 8;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
d2 = dx * dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
tokens[i][0].error = d2 + (best ? error1 : error0);
@@ -222,7 +222,7 @@
t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
e0 = 0;
} else {
- vp10_get_token_extra(x, &t0, &e0);
+ av1_get_token_extra(x, &t0, &e0);
t1 = t0;
}
if (next < default_eob) {
@@ -244,10 +244,10 @@
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = vp10_get_cost(t0, e0, cat6_high_cost);
+ base_bits = av1_get_cost(t0, e0, cat6_high_cost);
if (shortcut) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
} else {
@@ -255,7 +255,7 @@
}
#else
dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
d2 = dx * dx;
}
@@ -341,7 +341,7 @@
aom_fdct32x32(src, dst, src_stride);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src,
tran_low_t *dst, int src_stride) {
if (rd_transform)
@@ -349,12 +349,12 @@
else
aom_highbd_fdct32x32(src, dst, src_stride);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+void av1_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless) {
if (lossless) {
- vp10_fwht4x4(src_diff, coeff, diff_stride);
+ av1_fwht4x4(src_diff, coeff, diff_stride);
} else {
switch (tx_type) {
case DCT_DCT:
@@ -363,7 +363,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fht4x4(src_diff, coeff, diff_stride, tx_type);
+ av1_fht4x4(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -379,7 +379,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
+ av1_fht8x8(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -394,7 +394,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+ av1_fht16x16(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -420,12 +420,12 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless) {
if (lossless) {
assert(tx_type == DCT_DCT);
- vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+ av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
} else {
switch (tx_type) {
case DCT_DCT:
@@ -434,7 +434,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -452,7 +452,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -469,7 +469,7 @@
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
- vp10_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
+ av1_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
break;
default:
assert(0);
@@ -494,9 +494,9 @@
break;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
@@ -518,12 +518,12 @@
const int16_t *src_diff;
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
+ av1_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round_fp, p->quant_fp, p->quant_shift,
qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan,
@@ -535,7 +535,7 @@
break;
case TX_16X16:
aom_highbd_fdct16x16(src_diff, coeff, diff_stride);
- vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+ av1_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -546,7 +546,7 @@
break;
case TX_8X8:
aom_highbd_fdct8x8(src_diff, coeff, diff_stride);
- vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
+ av1_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -557,11 +557,11 @@
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+ av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
} else {
aom_highbd_fdct4x4(src_diff, coeff, diff_stride);
}
- vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+ av1_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -575,12 +575,12 @@
}
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_32X32:
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
- vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
+ av1_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -591,7 +591,7 @@
break;
case TX_16X16:
aom_fdct16x16(src_diff, coeff, diff_stride);
- vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+ av1_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -601,7 +601,7 @@
#endif
break;
case TX_8X8:
- vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
+ av1_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -612,11 +612,11 @@
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- vp10_fwht4x4(src_diff, coeff, diff_stride);
+ av1_fwht4x4(src_diff, coeff, diff_stride);
} else {
aom_fdct4x4(src_diff, coeff, diff_stride);
}
- vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+ av1_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
#if !CONFIG_AOM_QM
@@ -631,7 +631,7 @@
}
}
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
@@ -650,7 +650,7 @@
const int16_t *src_diff;
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
@@ -686,7 +686,7 @@
break;
case TX_4X4:
if (xd->lossless[seg_id]) {
- vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+ av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
} else {
aom_highbd_fdct4x4(src_diff, coeff, diff_stride);
}
@@ -703,7 +703,7 @@
}
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_32X32:
@@ -738,7 +738,7 @@
break;
case TX_4X4:
if (xd->lossless[seg_id]) {
- vp10_fwht4x4(src_diff, coeff, diff_stride);
+ av1_fwht4x4(src_diff, coeff, diff_stride);
} else {
aom_fdct4x4(src_diff, coeff, diff_stride);
}
@@ -756,7 +756,7 @@
}
}
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
@@ -778,7 +778,7 @@
const int16_t *src_diff;
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
@@ -816,7 +816,7 @@
#endif
break;
case TX_4X4:
- vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ av1_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[seg_id]);
aom_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
@@ -832,7 +832,7 @@
}
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_32X32:
@@ -869,7 +869,7 @@
#endif
break;
case TX_4X4:
- vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ av1_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[seg_id]);
aom_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
@@ -919,7 +919,7 @@
*a = *l = 0;
return;
} else {
- vp10_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
}
} else {
@@ -927,11 +927,11 @@
int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1));
if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) {
// full forward transform and quantization
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
} else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) {
// fast path forward transform and quantization
- vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
} else {
// skip forward transform
@@ -940,7 +940,7 @@
return;
}
} else {
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
}
}
@@ -956,26 +956,26 @@
if (p->eobs[block]) *(args->skip) = 0;
if (p->eobs[block] == 0) return;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride,
+ av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride,
p->eobs[block], xd->bd, tx_type);
break;
case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride,
+ av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride,
p->eobs[block], xd->bd, tx_type);
break;
case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride,
+ av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride,
p->eobs[block], xd->bd, tx_type);
break;
case TX_4X4:
- // this is like vp10_short_idct4x4 but has a special case around eob<=1
+ // this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride,
+ av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride,
p->eobs[block], xd->bd, tx_type,
xd->lossless[xd->mi[0]->mbmi.segment_id]);
break;
@@ -986,26 +986,26 @@
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, p->eobs[block],
tx_type);
break;
case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, p->eobs[block],
tx_type);
break;
case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, p->eobs[block],
tx_type);
break;
case TX_4X4:
- // this is like vp10_short_idct4x4 but has a special case around eob<=1
+ // this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp10_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
tx_type, xd->lossless[xd->mi[0]->mbmi.segment_id]);
break;
default:
@@ -1025,36 +1025,36 @@
uint8_t *dst;
dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
if (p->eobs[block] > 0) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
if (xd->lossless[0]) {
- vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
xd->bd);
} else {
- vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ av1_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
xd->bd);
}
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (xd->lossless[0]) {
- vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ av1_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
} else {
- vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ av1_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
}
}
}
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
- vp10_subtract_plane(x, bsize, 0);
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ av1_subtract_plane(x, bsize, 0);
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
encode_block_pass1, x);
}
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -1066,21 +1066,21 @@
if (x->skip) return;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- if (!x->skip_recode) vp10_subtract_plane(x, bsize, plane);
+ if (!x->skip_recode) av1_subtract_plane(x, bsize, plane);
if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
+ av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
ctx.tl[plane]);
}
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
&arg);
}
}
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
void *arg) {
struct encode_b_args *const args = arg;
@@ -1115,10 +1115,10 @@
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
- vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+ av1_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
dst_stride, blk_col, blk_row, plane);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
@@ -1138,7 +1138,7 @@
#endif
}
if (*eob)
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, xd->bd,
+ av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, xd->bd,
tx_type);
break;
case TX_16X16:
@@ -1156,7 +1156,7 @@
#endif
}
if (*eob)
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
+ av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
tx_type);
break;
case TX_8X8:
@@ -1174,14 +1174,14 @@
#endif
}
if (*eob)
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
+ av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
tx_type);
break;
case TX_4X4:
if (!x->skip_recode) {
aom_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
src_stride, dst, dst_stride, xd->bd);
- vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ av1_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[seg_id]);
aom_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
@@ -1194,10 +1194,10 @@
}
if (*eob)
- // this is like vp10_short_idct4x4 but has a special case around
+ // this is like av1_short_idct4x4 but has a special case around
// eob<=1 which is significant (not just an optimization) for the
// lossless case.
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
+ av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
tx_type, xd->lossless[seg_id]);
break;
default:
@@ -1207,7 +1207,7 @@
if (*eob) *(args->skip) = 0;
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
switch (tx_size) {
case TX_32X32:
@@ -1226,7 +1226,7 @@
#endif
}
if (*eob)
- vp10_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
+ av1_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_16X16:
if (!x->skip_recode) {
@@ -1243,7 +1243,7 @@
#endif
}
if (*eob)
- vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
+ av1_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_8X8:
if (!x->skip_recode) {
@@ -1259,13 +1259,13 @@
scan_order->iscan, qmatrix, iqmatrix);
#endif
}
- if (*eob) vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
+ if (*eob) av1_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_4X4:
if (!x->skip_recode) {
aom_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
dst_stride);
- vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ av1_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[seg_id]);
aom_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
@@ -1278,10 +1278,10 @@
}
if (*eob) {
- // this is like vp10_short_idct4x4 but has a special case around eob<=1
+ // this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp10_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
+ av1_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
xd->lossless[seg_id]);
}
break;
@@ -1292,10 +1292,10 @@
if (*eob) *(args->skip) = 0;
}
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
const MACROBLOCKD *const xd = &x->e_mbd;
struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip };
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
- vp10_encode_block_intra, &arg);
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane,
+ av1_encode_block_intra, &arg);
}
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
index cd3b677..f0ebaea 100644
--- a/av1/encoder/encodemb.h
+++ b/av1/encoder/encodemb.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_ENCODEMB_H_
-#define VP10_ENCODER_ENCODEMB_H_
+#ifndef AV1_ENCODER_ENCODEMB_H_
+#define AV1_ENCODER_ENCODEMB_H_
#include "./aom_config.h"
#include "av1/encoder/block.h"
@@ -24,33 +24,33 @@
struct optimize_ctx *ctx;
int8_t *skip;
};
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
void *arg);
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+void av1_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TX_TYPE tx_type, int lossless);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEMB_H_
+#endif // AV1_ENCODER_ENCODEMB_H_
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index 5cf3ad8..25c577e 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -20,16 +20,16 @@
#include "aom_dsp/aom_dsp_common.h"
-static struct vp10_token mv_joint_encodings[MV_JOINTS];
-static struct vp10_token mv_class_encodings[MV_CLASSES];
-static struct vp10_token mv_fp_encodings[MV_FP_SIZE];
-static struct vp10_token mv_class0_encodings[CLASS0_SIZE];
+static struct av1_token mv_joint_encodings[MV_JOINTS];
+static struct av1_token mv_class_encodings[MV_CLASSES];
+static struct av1_token mv_fp_encodings[MV_FP_SIZE];
+static struct av1_token mv_class0_encodings[CLASS0_SIZE];
-void vp10_entropy_mv_init(void) {
- vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree);
- vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree);
- vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree);
- vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
+void av1_entropy_mv_init(void) {
+ av1_tokens_from_tree(mv_joint_encodings, av1_mv_joint_tree);
+ av1_tokens_from_tree(mv_class_encodings, av1_mv_class_tree);
+ av1_tokens_from_tree(mv_class0_encodings, av1_mv_class0_tree);
+ av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
}
static void encode_mv_component(aom_writer *w, int comp,
@@ -37,7 +37,7 @@
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
- const int mv_class = vp10_get_mv_class(mag - 1, &offset);
+ const int mv_class = av1_get_mv_class(mag - 1, &offset);
const int d = offset >> 3; // int mv data
const int fr = (offset >> 1) & 3; // fractional mv data
const int hp = offset & 1; // high precision mv data
@@ -48,12 +48,12 @@
aom_write(w, sign, mvcomp->sign);
// Class
- vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
+ av1_write_token(w, av1_mv_class_tree, mvcomp->classes,
&mv_class_encodings[mv_class]);
// Integer bits
if (mv_class == MV_CLASS_0) {
- vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
+ av1_write_token(w, av1_mv_class0_tree, mvcomp->class0,
&mv_class0_encodings[d]);
} else {
int i;
@@ -62,7 +62,7 @@
}
// Fractional bits
- vp10_write_token(w, vp10_mv_fp_tree,
+ av1_write_token(w, av1_mv_fp_tree,
mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
&mv_fp_encodings[fr]);
@@ -80,30 +80,30 @@
int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
int class0_hp_cost[2], hp_cost[2];
- sign_cost[0] = vp10_cost_zero(mvcomp->sign);
- sign_cost[1] = vp10_cost_one(mvcomp->sign);
- vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree);
- vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree);
+ sign_cost[0] = av1_cost_zero(mvcomp->sign);
+ sign_cost[1] = av1_cost_one(mvcomp->sign);
+ av1_cost_tokens(class_cost, mvcomp->classes, av1_mv_class_tree);
+ av1_cost_tokens(class0_cost, mvcomp->class0, av1_mv_class0_tree);
for (i = 0; i < MV_OFFSET_BITS; ++i) {
- bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]);
- bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]);
+ bits_cost[i][0] = av1_cost_zero(mvcomp->bits[i]);
+ bits_cost[i][1] = av1_cost_one(mvcomp->bits[i]);
}
for (i = 0; i < CLASS0_SIZE; ++i)
- vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree);
- vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree);
+ av1_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], av1_mv_fp_tree);
+ av1_cost_tokens(fp_cost, mvcomp->fp, av1_mv_fp_tree);
if (usehp) {
- class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp);
- class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp);
- hp_cost[0] = vp10_cost_zero(mvcomp->hp);
- hp_cost[1] = vp10_cost_one(mvcomp->hp);
+ class0_hp_cost[0] = av1_cost_zero(mvcomp->class0_hp);
+ class0_hp_cost[1] = av1_cost_one(mvcomp->class0_hp);
+ hp_cost[0] = av1_cost_zero(mvcomp->hp);
+ hp_cost[1] = av1_cost_one(mvcomp->hp);
}
mvcost[0] = 0;
for (v = 1; v <= MV_MAX; ++v) {
int z, c, o, d, e, f, cost = 0;
z = v - 1;
- c = vp10_get_mv_class(z, &o);
+ c = av1_get_mv_class(z, &o);
cost += class_cost[c];
d = (o >> 3); /* int mv data */
f = (o >> 1) & 3; /* fractional pel mv data */
@@ -136,11 +136,11 @@
aom_prob upd_p) {
#if CONFIG_MISC_FIXES
(void)upd_p;
- vp10_cond_prob_diff_update(w, cur_p, ct);
+ av1_cond_prob_diff_update(w, cur_p, ct);
#else
const aom_prob new_p = get_binary_prob(ct[0], ct[1]) | 1;
- const int update = cost_branch256(ct, *cur_p) + vp10_cost_zero(upd_p) >
- cost_branch256(ct, new_p) + vp10_cost_one(upd_p) + 7 * 256;
+ const int update = cost_branch256(ct, *cur_p) + av1_cost_zero(upd_p) >
+ cost_branch256(ct, new_p) + av1_cost_one(upd_p) + 7 * 256;
aom_write(w, update, upd_p);
if (update) {
*cur_p = new_p;
@@ -159,17 +159,17 @@
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i)
update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
}
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
+void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
nmv_context_counts *const counts) {
int i, j;
nmv_context *const mvc = &cm->fc->nmvc;
- write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+ write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
w);
for (i = 0; i < 2; ++i) {
@@ -177,9 +177,9 @@
nmv_component_counts *comp_counts = &counts->comps[i];
update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
- write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+ write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
MV_CLASSES, w);
- write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+ write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
CLASS0_SIZE, w);
for (j = 0; j < MV_OFFSET_BITS; ++j)
update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -187,10 +187,10 @@
for (i = 0; i < 2; ++i) {
for (j = 0; j < CLASS0_SIZE; ++j)
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
- write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+ write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
MV_FP_SIZE, w);
}
@@ -203,13 +203,13 @@
}
}
-void vp10_encode_mv(VP10_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
- const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
- usehp = usehp && vp10_use_mv_hp(ref);
+ const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
+ usehp = usehp && av1_use_mv_hp(ref);
- vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
+ av1_write_token(w, av1_mv_joint_tree, mvctx->joints,
&mv_joint_encodings[j]);
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
@@ -225,9 +225,9 @@
}
}
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
const nmv_context *ctx, int usehp) {
- vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
+ av1_cost_tokens(mvjoint, ctx->joints, av1_mv_joint_tree);
build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
}
@@ -240,11 +240,11 @@
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
const MV diff = { mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col };
- vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
}
}
-void vp10_update_mv_count(ThreadData *td) {
+void av1_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
diff --git a/av1/encoder/encodemv.h b/av1/encoder/encodemv.h
index 5b4bf5a..1c0d90f 100644
--- a/av1/encoder/encodemv.h
+++ b/av1/encoder/encodemv.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_ENCODEMV_H_
-#define VP10_ENCODER_ENCODEMV_H_
+#ifndef AV1_ENCODER_ENCODEMV_H_
+#define AV1_ENCODER_ENCODEMV_H_
#include "av1/encoder/encoder.h"
@@ -18,21 +18,21 @@
extern "C" {
#endif
-void vp10_entropy_mv_init(void);
+void av1_entropy_mv_init(void);
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
+void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
nmv_context_counts *const counts);
-void vp10_encode_mv(VP10_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp);
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
const nmv_context *mvctx, int usehp);
-void vp10_update_mv_count(ThreadData *td);
+void av1_update_mv_count(ThreadData *td);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODEMV_H_
+#endif // AV1_ENCODER_ENCODEMV_H_
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 8afb342..e84a99a 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -119,7 +119,7 @@
// Mark all inactive blocks as active. Other segmentation features may be set
// so memset cannot be used, instead only inactive blocks should be reset.
-static void suppress_active_map(VP10_COMP *cpi) {
+static void suppress_active_map(AV1_COMP *cpi) {
unsigned char *const seg_map = cpi->segmentation_map;
int i;
if (cpi->active_map.enabled || cpi->active_map.update)
@@ -128,7 +128,7 @@
seg_map[i] = AM_SEGMENT_ID_ACTIVE;
}
-static void apply_active_map(VP10_COMP *cpi) {
+static void apply_active_map(AV1_COMP *cpi) {
struct segmentation *const seg = &cpi->common.seg;
unsigned char *const seg_map = cpi->segmentation_map;
const unsigned char *const active_map = cpi->active_map.map;
@@ -145,16 +145,16 @@
if (cpi->active_map.enabled) {
for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
- vp10_enable_segmentation(seg);
- vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
- vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+ av1_enable_segmentation(seg);
+ av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
// Setting the data to -MAX_LOOP_FILTER will result in the computed loop
// filter level being zero regardless of the value of seg->abs_delta.
- vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+ av1_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
-MAX_LOOP_FILTER);
} else {
- vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
- vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+ av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+ av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
if (seg->enabled) {
seg->update_data = 1;
seg->update_map = 1;
@@ -164,7 +164,7 @@
}
}
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
unsigned char *const active_map_8x8 = cpi->active_map.map;
@@ -191,7 +191,7 @@
}
}
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
new_map_16x16) {
@@ -216,7 +216,7 @@
}
}
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->td.mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
if (cpi->common.allow_high_precision_mv) {
@@ -228,15 +228,15 @@
}
}
-static void setup_frame(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void setup_frame(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// Set up entropy context depending on frame type. The decoder mandates
// the use of the default context, index 0, for keyframes and inter
// frames where the error_resilient_mode or intra_only flag is set. For
// other inter-frames the encoder currently uses only two contexts;
// context 1 for ALTREF frames and context 0 for the others.
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
- vp10_setup_past_independence(cm);
+ av1_setup_past_independence(cm);
} else {
cm->frame_context_idx = cpi->refresh_alt_ref_frame;
}
@@ -244,14 +244,14 @@
if (cm->frame_type == KEY_FRAME) {
cpi->refresh_golden_frame = 1;
cpi->refresh_alt_ref_frame = 1;
- vp10_zero(cpi->interp_filter_selected);
+ av1_zero(cpi->interp_filter_selected);
} else {
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
- vp10_zero(cpi->interp_filter_selected[0]);
+ av1_zero(cpi->interp_filter_selected[0]);
}
}
-static void vp10_enc_setup_mi(VP10_COMMON *cm) {
+static void av1_enc_setup_mi(AV1_COMMON *cm) {
int i;
cm->mi = cm->mip + cm->mi_stride + 1;
memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
@@ -269,7 +269,7 @@
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
-static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
+static int av1_enc_alloc_mi(AV1_COMMON *cm, int mi_size) {
cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
cm->prev_mip = aom_calloc(mi_size, sizeof(*cm->prev_mip));
@@ -285,7 +285,7 @@
return 0;
}
-static void vp10_enc_free_mi(VP10_COMMON *cm) {
+static void av1_enc_free_mi(AV1_COMMON *cm) {
aom_free(cm->mip);
cm->mip = NULL;
aom_free(cm->prev_mip);
@@ -296,7 +296,7 @@
cm->prev_mi_grid_base = NULL;
}
-static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
+static void av1_swap_mi_and_prev_mi(AV1_COMMON *cm) {
// Current mip will be the prev_mip for the next frame.
MODE_INFO **temp_base = cm->prev_mi_grid_base;
MODE_INFO *temp = cm->prev_mip;
@@ -313,24 +313,24 @@
cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
}
-void vp10_initialize_enc(void) {
+void av1_initialize_enc(void) {
static volatile int init_done = 0;
if (!init_done) {
av1_rtcd();
aom_dsp_rtcd();
aom_scale_rtcd();
- vp10_init_intra_predictors();
- vp10_init_me_luts();
- vp10_rc_init_minq_luts();
- vp10_entropy_mv_init();
- vp10_encode_token_init();
+ av1_init_intra_predictors();
+ av1_init_me_luts();
+ av1_rc_init_minq_luts();
+ av1_entropy_mv_init();
+ av1_encode_token_init();
init_done = 1;
}
}
-static void dealloc_compressor_data(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void dealloc_compressor_data(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
aom_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
@@ -364,25 +364,25 @@
cpi->nmvsadcosts_hp[0] = NULL;
cpi->nmvsadcosts_hp[1] = NULL;
- vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+ av1_cyclic_refresh_free(cpi->cyclic_refresh);
cpi->cyclic_refresh = NULL;
aom_free(cpi->active_map.map);
cpi->active_map.map = NULL;
- vp10_free_ref_frame_buffers(cm->buffer_pool);
- vp10_free_context_buffers(cm);
+ av1_free_ref_frame_buffers(cm->buffer_pool);
+ av1_free_context_buffers(cm);
aom_free_frame_buffer(&cpi->last_frame_uf);
aom_free_frame_buffer(&cpi->scaled_source);
aom_free_frame_buffer(&cpi->scaled_last_source);
aom_free_frame_buffer(&cpi->alt_ref_buffer);
- vp10_lookahead_destroy(cpi->lookahead);
+ av1_lookahead_destroy(cpi->lookahead);
aom_free(cpi->tile_tok[0][0]);
cpi->tile_tok[0][0] = 0;
- vp10_free_pc_tree(&cpi->td);
+ av1_free_pc_tree(&cpi->td);
if (cpi->source_diff_var != NULL) {
aom_free(cpi->source_diff_var);
@@ -390,15 +390,15 @@
}
}
-static void save_coding_context(VP10_COMP *cpi) {
+static void save_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
// Stores a snapshot of key state variables which can subsequently be
- // restored with a call to vp10_restore_coding_context. These functions are
- // intended for use in a re-code loop in vp10_compress_frame where the
+ // restored with a call to av1_restore_coding_context. These functions are
+ // intended for use in a re-code loop in av1_compress_frame where the
// quantizer value is adjusted between loop iterations.
- vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+ av1_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
@@ -410,25 +410,25 @@
MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
#if !CONFIG_MISC_FIXES
- vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs);
+ av1_copy(cc->segment_pred_probs, cm->segp.pred_probs);
#endif
memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
(cm->mi_rows * cm->mi_cols));
- vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
- vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+ av1_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+ av1_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
cc->fc = *cm->fc;
}
-static void restore_coding_context(VP10_COMP *cpi) {
+static void restore_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
// Restore key state variables to the snapshot state stored in the
- // previous call to vp10_save_coding_context.
- vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+ // previous call to av1_save_coding_context.
+ av1_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
@@ -438,20 +438,20 @@
MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
#if !CONFIG_MISC_FIXES
- vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs);
+ av1_copy(cm->segp.pred_probs, cc->segment_pred_probs);
#endif
memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
- vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
- vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+ av1_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+ av1_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
*cm->fc = cc->fc;
}
-static void configure_static_seg_features(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void configure_static_seg_features(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
struct segmentation *const seg = &cm->seg;
@@ -467,10 +467,10 @@
cpi->static_mb_pct = 0;
// Disable segmentation
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
// Clear down the segment features.
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
} else if (cpi->refresh_alt_ref_frame) {
// If this is an alt ref frame
// Clear down the global segmentation map
@@ -480,12 +480,12 @@
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
- vp10_disable_segmentation(seg);
- vp10_clearall_segfeatures(seg);
+ av1_disable_segmentation(seg);
+ av1_clearall_segfeatures(seg);
// Scan frames from current to arf frame.
// This function re-enables segmentation if appropriate.
- vp10_update_mbgraph_stats(cpi);
+ av1_update_mbgraph_stats(cpi);
// If segmentation was enabled set those features needed for the
// arf itself.
@@ -494,12 +494,12 @@
seg->update_data = 1;
qi_delta =
- vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+ av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Where relevant assume segment data is delta data
seg->abs_delta = SEGMENT_DELTADATA;
@@ -515,32 +515,32 @@
seg->update_data = 1;
seg->abs_delta = SEGMENT_DELTADATA;
- qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
+ qi_delta = av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
cm->bit_depth);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
- vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
- vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+ av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+ av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Segment coding disabled for compred testing
if (high_q || (cpi->static_mb_pct == 100)) {
- vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
} else {
// Disable segmentation and clear down features if alt ref
// is not active for this group
- vp10_disable_segmentation(seg);
+ av1_disable_segmentation(seg);
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
}
} else if (rc->is_src_frame_alt_ref) {
// Special case where we are coding over the top of a previous
@@ -548,19 +548,19 @@
// Segment coding disabled for compred testing
// Enable ref frame features for segment 0 as well
- vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
- vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+ av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
// All mbs should use ALTREF_FRAME
- vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
- vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
- vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+ av1_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ av1_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+ av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
// Skip all MBs if high Q (0,0 mv and skip coeffs)
if (high_q) {
- vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP);
- vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+ av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
// Enable data update
seg->update_data = 1;
@@ -574,8 +574,8 @@
}
}
-static void update_reference_segmentation_map(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void update_reference_segmentation_map(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map;
int row, col;
@@ -590,14 +590,14 @@
}
}
-static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static void alloc_raw_frame_buffers(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
if (!cpi->lookahead)
- cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
+ cpi->lookahead = av1_lookahead_init(oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
oxcf->lag_in_frames);
@@ -608,7 +608,7 @@
// TODO(agrange) Check if ARF is enabled and skip allocation if not.
if (aom_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -617,11 +617,11 @@
"Failed to allocate altref buffer");
}
-static void alloc_util_frame_buffers(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void alloc_util_frame_buffers(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
if (aom_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -631,7 +631,7 @@
if (aom_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -641,7 +641,7 @@
if (aom_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -650,8 +650,8 @@
"Failed to allocate scaled last source buffer");
}
-static int alloc_context_buffers_ext(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+static int alloc_context_buffers_ext(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
cpi->mbmi_ext_base = aom_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
@@ -660,10 +660,10 @@
return 0;
}
-void vp10_alloc_compressor_data(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_alloc_compressor_data(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
- vp10_alloc_context_buffers(cm, cm->width, cm->height);
+ av1_alloc_context_buffers(cm, cm->width, cm->height);
alloc_context_buffers_ext(cpi);
@@ -675,53 +675,53 @@
aom_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
}
- vp10_setup_pc_tree(&cpi->common, &cpi->td);
+ av1_setup_pc_tree(&cpi->common, &cpi->td);
}
-void vp10_new_framerate(VP10_COMP *cpi, double framerate) {
+void av1_new_framerate(AV1_COMP *cpi, double framerate) {
cpi->framerate = framerate < 0.1 ? 30 : framerate;
- vp10_rc_update_framerate(cpi);
+ av1_rc_update_framerate(cpi);
}
-static void set_tile_limits(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_tile_limits(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int min_log2_tile_cols, max_log2_tile_cols;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
cm->log2_tile_cols =
clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
cm->log2_tile_rows = cpi->oxcf.tile_rows;
}
-static void update_frame_size(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void update_frame_size(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- vp10_set_mb_mi(cm, cm->width, cm->height);
- vp10_init_context_buffers(cm);
- vp10_init_macroblockd(cm, xd, NULL);
+ av1_set_mb_mi(cm, cm->width, cm->height);
+ av1_init_context_buffers(cm);
+ av1_init_macroblockd(cm, xd, NULL);
memset(cpi->mbmi_ext_base, 0,
cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
set_tile_limits(cpi);
}
-static void init_buffer_indices(VP10_COMP *cpi) {
+static void init_buffer_indices(AV1_COMP *cpi) {
cpi->lst_fb_idx = 0;
cpi->gld_fb_idx = 1;
cpi->alt_fb_idx = 2;
}
-static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
- VP10_COMMON *const cm = &cpi->common;
+static void init_config(struct AV1_COMP *cpi, AV1EncoderConfig *oxcf) {
+ AV1_COMMON *const cm = &cpi->common;
cpi->oxcf = *oxcf;
cpi->framerate = oxcf->init_framerate;
cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = oxcf->use_highbitdepth;
#endif
cm->color_space = oxcf->color_space;
@@ -729,13 +729,13 @@
cm->width = oxcf->width;
cm->height = oxcf->height;
- vp10_alloc_compressor_data(cpi);
+ av1_alloc_compressor_data(cpi);
// Single thread case: use counts in common.
cpi->td.counts = &cm->counts;
// change includes all joint functionality
- vp10_change_config(cpi, oxcf);
+ av1_change_config(cpi, oxcf);
cpi->static_mb_pct = 0;
cpi->ref_frame_flags = 0;
@@ -744,7 +744,7 @@
}
static void set_rc_buffer_sizes(RATE_CONTROL *rc,
- const VP10EncoderConfig *oxcf) {
+ const AV1EncoderConfig *oxcf) {
const int64_t bandwidth = oxcf->target_bandwidth;
const int64_t starting = oxcf->starting_buffer_level_ms;
const int64_t optimal = oxcf->optimal_buffer_level_ms;
@@ -757,7 +757,7 @@
(maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
cpi->fn_ptr[BT].sdf = SDF; \
cpi->fn_ptr[BT].sdaf = SDAF; \
@@ -924,8 +924,8 @@
MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x4x4d)
/* clang-format on */
-static void highbd_set_var_fns(VP10_COMP *const cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void highbd_set_var_fns(AV1_COMP *const cpi) {
+ AV1_COMMON *const cm = &cpi->common;
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
case VPX_BITS_8:
@@ -1188,10 +1188,10 @@
}
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void realloc_segmentation_maps(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void realloc_segmentation_maps(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
// Create the encoder segmentation map and set all entries to 0
aom_free(cpi->segmentation_map);
@@ -1199,9 +1199,9 @@
aom_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh.
- if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+ if (cpi->cyclic_refresh) av1_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
- vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
+ av1_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
// Create a map used to mark inactive areas.
aom_free(cpi->active_map.map);
@@ -1215,8 +1215,8 @@
aom_calloc(cm->mi_rows * cm->mi_cols, 1));
}
-void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
@@ -1230,9 +1230,9 @@
assert(cm->bit_depth > VPX_BITS_8);
cpi->oxcf = *oxcf;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
rc->baseline_gf_interval = FIXED_GF_INTERVAL;
@@ -1249,8 +1249,8 @@
: REFRESH_FRAME_CONTEXT_BACKWARD;
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
- vp10_reset_segment_features(cm);
- vp10_set_high_precision_mv(cpi, 0);
+ av1_reset_segment_features(cm);
+ av1_set_high_precision_mv(cpi, 0);
{
int i;
@@ -1268,7 +1268,7 @@
rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
// Set up frame rate and related parameters rate control values.
- vp10_new_framerate(cpi, cpi->framerate);
+ av1_new_framerate(cpi, cpi->framerate);
// Set absolute upper and lower quality limits
rc->worst_quality = cpi->oxcf.worst_allowed_q;
@@ -1288,8 +1288,8 @@
if (cpi->initial_width) {
if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
- vp10_free_context_buffers(cm);
- vp10_alloc_compressor_data(cpi);
+ av1_free_context_buffers(cm);
+ av1_alloc_compressor_data(cpi);
realloc_segmentation_maps(cpi);
cpi->initial_width = cpi->initial_height = 0;
}
@@ -1310,7 +1310,7 @@
cpi->ext_refresh_frame_flags_pending = 0;
cpi->ext_refresh_frame_context_pending = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
}
@@ -1357,26 +1357,26 @@
} while (++i <= MV_MAX);
}
-VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
BufferPool *const pool) {
unsigned int i;
- VP10_COMP *volatile const cpi = aom_memalign(32, sizeof(VP10_COMP));
- VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
+ AV1_COMP *volatile const cpi = aom_memalign(32, sizeof(AV1_COMP));
+ AV1_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
if (!cm) return NULL;
- vp10_zero(*cpi);
+ av1_zero(*cpi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
- vp10_remove_compressor(cpi);
+ av1_remove_compressor(cpi);
return 0;
}
cm->error.setjmp = 1;
- cm->alloc_mi = vp10_enc_alloc_mi;
- cm->free_mi = vp10_enc_free_mi;
- cm->setup_mi = vp10_enc_setup_mi;
+ cm->alloc_mi = av1_enc_alloc_mi;
+ cm->free_mi = av1_enc_free_mi;
+ cm->setup_mi = av1_enc_setup_mi;
CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
@@ -1389,7 +1389,7 @@
cpi->common.buffer_pool = pool;
init_config(cpi, oxcf);
- vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
+ av1_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
cm->current_video_frame = 0;
cpi->partition_search_skippable_frame = 0;
@@ -1511,7 +1511,7 @@
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
if (oxcf->pass == 1) {
- vp10_init_first_pass(cpi);
+ av1_init_first_pass(cpi);
} else if (oxcf->pass == 2) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
@@ -1533,11 +1533,11 @@
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
- vp10_init_second_pass(cpi);
+ av1_init_second_pass(cpi);
}
- vp10_set_speed_features_framesize_independent(cpi);
- vp10_set_speed_features_framesize_dependent(cpi);
+ av1_set_speed_features_framesize_independent(cpi);
+ av1_set_speed_features_framesize_dependent(cpi);
// Allocate memory to store variances for a frame.
CHECK_MEM_ERROR(cm, cpi->source_diff_var, aom_calloc(cm->MBs, sizeof(diff)));
@@ -1606,21 +1606,21 @@
aom_sub_pixel_variance4x4, aom_sub_pixel_avg_variance4x4, aom_sad4x4x3,
aom_sad4x4x8, aom_sad4x4x4d)
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
- /* vp10_init_quantizer() is first called here. Add check in
- * vp10_frame_init_quantizer() so that vp10_init_quantizer is only
+ /* av1_init_quantizer() is first called here. Add check in
+ * av1_frame_init_quantizer() so that av1_init_quantizer is only
* called later when needed. This will avoid unnecessary calls of
- * vp10_init_quantizer() for every frame.
+ * av1_init_quantizer() for every frame.
*/
- vp10_init_quantizer(cpi);
+ av1_init_quantizer(cpi);
#if CONFIG_AOM_QM
aom_qm_init(cm);
#endif
- vp10_loop_filter_init(cm);
+ av1_loop_filter_init(cm);
cm->error.setjmp = 0;
@@ -1631,8 +1631,8 @@
#define SNPRINT2(H, T, V) \
snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
-void vp10_remove_compressor(VP10_COMP *cpi) {
- VP10_COMMON *cm;
+void av1_remove_compressor(AV1_COMP *cpi) {
+ AV1_COMMON *cm;
unsigned int i;
int t;
@@ -1733,14 +1733,14 @@
// Deallocate allocated thread data.
if (t < cpi->num_workers - 1) {
aom_free(thread_data->td->counts);
- vp10_free_pc_tree(thread_data->td);
+ av1_free_pc_tree(thread_data->td);
aom_free(thread_data->td);
}
}
aom_free(cpi->tile_thr_data);
aom_free(cpi->workers);
- if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+ if (cpi->num_workers > 1) av1_loop_filter_dealloc(&cpi->lf_row_sync);
dealloc_compressor_data(cpi);
@@ -1756,8 +1756,8 @@
}
#endif
- vp10_remove_common(cm);
- vp10_free_ref_frame_buffers(cm->buffer_pool);
+ av1_remove_common(cm);
+ av1_free_ref_frame_buffers(cm->buffer_pool);
aom_free(cpi);
#ifdef OUTPUT_YUV_SKINMAP
@@ -1804,7 +1804,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w,
int h, uint64_t *sse, uint64_t *sum) {
@@ -1836,7 +1836,7 @@
*sse = (unsigned int)sse_long;
*sum = (int)sum_long;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, int width, int height) {
@@ -1878,7 +1878,7 @@
return total_sse;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int width,
int height, unsigned int input_shift) {
@@ -1931,7 +1931,7 @@
}
return total_sse;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
typedef struct {
double psnr[4]; // total/y/u/v
@@ -1939,7 +1939,7 @@
uint32_t samples[4]; // total/y/u/v
} PSNR_STATS;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
unsigned int bit_depth,
@@ -1987,7 +1987,7 @@
aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
-#else // !CONFIG_VPX_HIGHBITDEPTH
+#else // !CONFIG_AOM_HIGHBITDEPTH
static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
PSNR_STATS *psnr) {
@@ -2022,13 +2022,13 @@
psnr->psnr[0] =
aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static void generate_psnr_packet(VP10_COMP *cpi) {
+static void generate_psnr_packet(AV1_COMP *cpi) {
struct aom_codec_cx_pkt pkt;
int i;
PSNR_STATS psnr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
#else
@@ -2044,22 +2044,22 @@
aom_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags) {
if (ref_frame_flags > 7) return -1;
cpi->ref_frame_flags = ref_frame_flags;
return 0;
}
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags) {
cpi->ext_refresh_golden_frame = (ref_frame_flags & VPX_GOLD_FLAG) != 0;
cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VPX_ALT_FLAG) != 0;
cpi->ext_refresh_last_frame = (ref_frame_flags & VPX_LAST_FLAG) != 0;
cpi->ext_refresh_frame_flags_pending = 1;
}
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
- VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_av1_ref_frame_buffer(
+ AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
MV_REFERENCE_FRAME ref_frame = NONE;
if (ref_frame_flag == VPX_LAST_FLAG)
ref_frame = LAST_FRAME;
@@ -2071,9 +2071,9 @@
return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
}
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
- YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+ YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
if (cfg) {
aom_yv12_copy_frame(cfg, sd);
return 0;
@@ -2082,9 +2082,9 @@
}
}
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_set_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
- YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+ YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
if (cfg) {
aom_yv12_copy_frame(sd, cfg);
return 0;
@@ -2093,7 +2093,7 @@
}
}
-int vp10_update_entropy(VP10_COMP *cpi, int update) {
+int av1_update_entropy(AV1_COMP *cpi, int update) {
cpi->ext_refresh_frame_context = update;
cpi->ext_refresh_frame_context_pending = 1;
return 0;
@@ -2104,7 +2104,7 @@
// as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
// not denoise the UV channels at this time. If ever we implement UV channel
// denoising we will have to modify this.
-void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
+void av1_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
uint8_t *src = s->y_buffer;
int h = s->y_height;
@@ -2132,12 +2132,12 @@
#endif
#ifdef OUTPUT_YUV_REC
-void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
+void av1_write_yuv_rec_frame(AV1_COMMON *cm) {
YV12_BUFFER_CONFIG *s = cm->frame_to_show;
uint8_t *src = s->y_buffer;
int h = cm->height;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
@@ -2165,7 +2165,7 @@
fflush(yuv_rec_file);
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
do {
fwrite(src, s->y_width, 1, yuv_rec_file);
@@ -2192,14 +2192,14 @@
}
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int bd) {
#else
static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst) {
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
int i;
const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
@@ -2217,30 +2217,30 @@
dst->uv_crop_height };
for (i = 0; i < MAX_MB_PLANE; ++i) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
+ av1_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
src_strides[i], dsts[i], dst_heights[i],
dst_widths[i], dst_strides[i], bd);
} else {
- vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+ av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
}
#else
- vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+ av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
aom_extend_frame_borders(dst);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd) {
#else
static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst) {
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const int src_w = src->y_crop_width;
const int src_h = src->y_crop_height;
const int dst_w = dst->y_crop_width;
@@ -2250,7 +2250,7 @@
const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
- const InterpKernel *const kernel = vp10_filter_kernels[EIGHTTAP];
+ const InterpKernel *const kernel = av1_filter_kernels[EIGHTTAP];
int x, y, i;
for (y = 0; y < dst_h; y += 16) {
@@ -2266,7 +2266,7 @@
(x / factor) * src_w / dst_w;
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
aom_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
@@ -2283,7 +2283,7 @@
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
16 / factor);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -2291,7 +2291,7 @@
aom_extend_frame_borders(dst);
}
-static int scale_down(VP10_COMP *cpi, int q) {
+static int scale_down(AV1_COMP *cpi, int q) {
RATE_CONTROL *const rc = &cpi->rc;
GF_GROUP *const gf_group = &cpi->twopass.gf_group;
int scale = 0;
@@ -2309,10 +2309,10 @@
// Function to test for conditions that indicate we should loop
// back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
+static int recode_loop_test(AV1_COMP *cpi, int high_limit, int low_limit,
int q, int maxq, int minq) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
int force_recode = 0;
@@ -2342,8 +2342,8 @@
return force_recode;
}
-void vp10_update_reference_frames(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_update_reference_frames(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
// At this point the new frame has been encoded.
@@ -2353,10 +2353,10 @@
cm->new_fb_idx);
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
cm->new_fb_idx);
- } else if (vp10_preserve_existing_gf(cpi)) {
+ } else if (av1_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term in function
- // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+ // av1_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
// we're updating the GF with the current decoded frame, we save it to the
// ARF slot instead.
// We now have to update the ARF with the current frame and swap gld_fb_idx
@@ -2408,7 +2408,7 @@
}
}
-static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
+static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
struct loopfilter *lf = &cm->lf;
if (is_lossless_requested(&cpi->oxcf)) {
@@ -2420,7 +2420,7 @@
aom_usec_timer_start(&timer);
- vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
+ av1_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
aom_usec_timer_mark(&timer);
cpi->time_pick_lpf += aom_usec_timer_elapsed(&timer);
@@ -2428,20 +2428,20 @@
if (lf->filter_level > 0) {
if (cpi->num_workers > 1)
- vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+ av1_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
lf->filter_level, 0, 0, cpi->workers,
cpi->num_workers, &cpi->lf_row_sync);
else
- vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+ av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
}
#if CONFIG_DERING
if (is_lossless_requested(&cpi->oxcf)) {
cm->dering_level = 0;
} else {
- cm->dering_level = vp10_dering_search(cm->frame_to_show, cpi->Source, cm,
+ cm->dering_level = av1_dering_search(cm->frame_to_show, cpi->Source, cm,
xd);
- vp10_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
+ av1_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
}
#endif // CONFIG_DERING
@@ -2454,7 +2454,7 @@
// TODO(yaowu): investigate per-segment CLPF decision and
// an optimal threshold, use 80 for now.
for (i = 0; i < MAX_SEGMENTS; i++)
- hq &= vp10_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
+ hq &= av1_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
if (!hq) { // Don't try filter if the entire image is nearly losslessly
// encoded
@@ -2470,7 +2470,7 @@
get_sse(cpi->Source->v_buffer, cpi->Source->uv_stride,
cm->frame_to_show->v_buffer, cm->frame_to_show->uv_stride,
cpi->Source->uv_crop_width, cpi->Source->uv_crop_height);
- vp10_clpf_frame(cm->frame_to_show, cm, xd);
+ av1_clpf_frame(cm->frame_to_show, cm, xd);
after = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
cpi->Source->y_crop_width, cpi->Source->y_crop_height) +
@@ -2485,7 +2485,7 @@
before = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
cpi->Source->y_crop_width, cpi->Source->y_crop_height);
- vp10_clpf_frame(cm->frame_to_show, cm, xd);
+ av1_clpf_frame(cm->frame_to_show, cm, xd);
after = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
cpi->Source->y_crop_width, cpi->Source->y_crop_height);
@@ -2507,7 +2507,7 @@
aom_extend_frame_inner_borders(cm->frame_to_show);
}
-static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, int buffer_idx) {
+static INLINE void alloc_frame_mvs(const AV1_COMMON *cm, int buffer_idx) {
RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
new_fb_ptr->mi_cols < cm->mi_cols) {
@@ -2519,8 +2519,8 @@
}
}
-void vp10_scale_references(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+void av1_scale_references(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
const VPX_REFFRAME ref_mask[3] = { VPX_LAST_FLAG, VPX_GOLD_FLAG,
VPX_ALT_FLAG };
@@ -2537,7 +2537,7 @@
continue;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
RefCntBuffer *new_fb_ptr = NULL;
int force_scaling = 0;
@@ -2579,7 +2579,7 @@
cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
alloc_frame_mvs(cm, new_fb);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} else {
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
RefCntBuffer *const buf = &pool->frame_bufs[buf_idx];
@@ -2594,8 +2594,8 @@
}
}
-static void release_scaled_references(VP10_COMP *cpi) {
- VP10_COMMON *cm = &cpi->common;
+static void release_scaled_references(AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
int i;
if (cpi->oxcf.pass == 0) {
// Only release scaled references under certain conditions:
@@ -2640,8 +2640,8 @@
model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
}
-static void full_to_model_counts(vp10_coeff_count_model *model_count,
- vp10_coeff_count *full_count) {
+static void full_to_model_counts(av1_coeff_count_model *model_count,
+ av1_coeff_count *full_count) {
int i, j, k, l;
for (i = 0; i < PLANE_TYPES; ++i)
@@ -2652,14 +2652,14 @@
}
#if 0 && CONFIG_INTERNAL_STATS
-static void output_frame_level_debug_stats(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void output_frame_level_debug_stats(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
int64_t recon_err;
aom_clear_system_state();
- recon_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ recon_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
if (cpi->twopass.total_left_stats.coded_error != 0.0)
fprintf(f, "%10u %dx%d %10d %10d %d %d %10d %10d %10d %10d"
@@ -2686,12 +2686,12 @@
cpi->rc.total_target_vs_actual,
(cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
cpi->rc.total_actual_bits, cm->base_qindex,
- vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
- (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
- vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality,
+ av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
+ (double)av1_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
+ av1_convert_qindex_to_q(cpi->twopass.active_worst_quality,
cm->bit_depth),
cpi->rc.avg_q,
- vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
+ av1_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
cpi->refresh_last_frame, cpi->refresh_golden_frame,
cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
cpi->twopass.bits_left,
@@ -2722,12 +2722,12 @@
}
#endif
-static void set_mv_search_params(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+static void set_mv_search_params(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
// Default based on max resolution.
- cpi->mv_step_param = vp10_init_search_range(max_mv_def);
+ cpi->mv_step_param = av1_init_search_range(max_mv_def);
if (cpi->sf.mv.auto_mv_step_size) {
if (frame_is_intra_only(cm)) {
@@ -2739,7 +2739,7 @@
// Allow mv_steps to correspond to twice the max mv magnitude found
// in the previous frame, capped by the default max_mv_magnitude based
// on resolution.
- cpi->mv_step_param = vp10_init_search_range(
+ cpi->mv_step_param = av1_init_search_range(
VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
}
cpi->max_mv_magnitude = 0;
@@ -2747,26 +2747,26 @@
}
}
-static void set_size_independent_vars(VP10_COMP *cpi) {
- vp10_set_speed_features_framesize_independent(cpi);
- vp10_set_rd_speed_thresholds(cpi);
- vp10_set_rd_speed_thresholds_sub8x8(cpi);
+static void set_size_independent_vars(AV1_COMP *cpi) {
+ av1_set_speed_features_framesize_independent(cpi);
+ av1_set_rd_speed_thresholds(cpi);
+ av1_set_rd_speed_thresholds_sub8x8(cpi);
cpi->common.interp_filter = cpi->sf.default_interp_filter;
}
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+static void set_size_dependent_vars(AV1_COMP *cpi, int *q, int *bottom_index,
int *top_index) {
- VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
// Setup variables that depend on the dimensions of the frame.
- vp10_set_speed_features_framesize_dependent(cpi);
+ av1_set_speed_features_framesize_dependent(cpi);
// Decide q and q bounds.
- *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+ *q = av1_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
if (!frame_is_intra_only(cm)) {
- vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+ av1_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
}
// Configure experimental use of segmentation for enhanced coding of
@@ -2777,30 +2777,30 @@
configure_static_seg_features(cpi);
}
-static void init_motion_estimation(VP10_COMP *cpi) {
+static void init_motion_estimation(AV1_COMP *cpi) {
int y_stride = cpi->scaled_source.y_stride;
if (cpi->sf.mv.search_method == NSTEP) {
- vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+ av1_init3smotion_compensation(&cpi->ss_cfg, y_stride);
} else if (cpi->sf.mv.search_method == DIAMOND) {
- vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+ av1_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
}
}
-static void set_frame_size(VP10_COMP *cpi) {
+static void set_frame_size(AV1_COMP *cpi) {
int ref_frame;
- VP10_COMMON *const cm = &cpi->common;
- VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ AV1EncoderConfig *const oxcf = &cpi->oxcf;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
(oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
- vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+ av1_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
&oxcf->scaled_frame_height);
// There has been a change in frame size.
- vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+ av1_set_size_literal(cpi, oxcf->scaled_frame_width,
oxcf->scaled_frame_height);
}
@@ -2818,7 +2818,7 @@
}
if (cpi->resize_pending != 0) {
// There has been a change in frame size.
- vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+ av1_set_size_literal(cpi, oxcf->scaled_frame_width,
oxcf->scaled_frame_height);
// TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
@@ -2827,7 +2827,7 @@
}
if (oxcf->pass == 2) {
- vp10_set_target_rate(cpi);
+ av1_set_target_rate(cpi);
}
alloc_frame_mvs(cm, cm->new_fb_idx);
@@ -2835,7 +2835,7 @@
// Reset the frame pointers to the current frame size.
aom_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL,
@@ -2853,16 +2853,16 @@
if (buf_idx != INVALID_IDX) {
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
ref_buf->buf = buf;
-#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
#else
- vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+ av1_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
buf->y_crop_height, cm->width,
cm->height);
-#endif // CONFIG_VPX_HIGHBITDEPTH
- if (vp10_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ if (av1_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
} else {
ref_buf->buf = NULL;
}
@@ -2871,8 +2871,8 @@
set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
}
-static void encode_without_recode_loop(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void encode_without_recode_loop(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int q = 0, bottom_index = 0, top_index = 0; // Dummy variables.
aom_clear_system_state();
@@ -2885,28 +2885,28 @@
cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
cpi->un_scaled_source->y_width == (cm->width << 1) &&
cpi->un_scaled_source->y_height == (cm->height << 1)) {
- cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
+ cpi->Source = av1_scale_if_required_fast(cm, cpi->un_scaled_source,
&cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required_fast(
+ cpi->Last_Source = av1_scale_if_required_fast(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
} else {
cpi->Source =
- vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+ av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+ cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
&cpi->scaled_last_source);
}
if (frame_is_intra_only(cm) == 0) {
- vp10_scale_references(cpi);
+ av1_scale_references(cpi);
}
set_size_independent_vars(cpi);
set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
- vp10_set_quantizer(cm, q);
- vp10_set_variance_partition_thresholds(cpi, q);
+ av1_set_quantizer(cm, q);
+ av1_set_variance_partition_thresholds(cpi, q);
setup_frame(cpi);
@@ -2914,22 +2914,22 @@
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
- vp10_vaq_frame_setup(cpi);
+ av1_vaq_frame_setup(cpi);
} else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- vp10_setup_in_frame_q_adj(cpi);
+ av1_setup_in_frame_q_adj(cpi);
} else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp10_cyclic_refresh_setup(cpi);
+ av1_cyclic_refresh_setup(cpi);
}
apply_active_map(cpi);
// transform / motion compensation build reconstruction frame
- vp10_encode_frame(cpi);
+ av1_encode_frame(cpi);
// Update some stats from cyclic refresh, and check if we should not update
// golden reference, for 1 pass CBR.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
(cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
- vp10_cyclic_refresh_check_golden_update(cpi);
+ av1_cyclic_refresh_check_golden_update(cpi);
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
@@ -2937,9 +2937,9 @@
aom_clear_system_state();
}
-static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
+static void encode_with_recode_loop(AV1_COMP *cpi, size_t *size,
uint8_t *dest) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int bottom_index, top_index;
int loop_count = 0;
@@ -2979,39 +2979,39 @@
// Decide frame size bounds first time through.
if (loop_count == 0) {
- vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
+ av1_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
&frame_under_shoot_limit,
&frame_over_shoot_limit);
}
cpi->Source =
- vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+ av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+ cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
&cpi->scaled_last_source);
if (frame_is_intra_only(cm) == 0) {
if (loop_count > 0) {
release_scaled_references(cpi);
}
- vp10_scale_references(cpi);
+ av1_scale_references(cpi);
}
- vp10_set_quantizer(cm, q);
+ av1_set_quantizer(cm, q);
if (loop_count == 0) setup_frame(cpi);
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
- vp10_vaq_frame_setup(cpi);
+ av1_vaq_frame_setup(cpi);
} else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- vp10_setup_in_frame_q_adj(cpi);
+ av1_setup_in_frame_q_adj(cpi);
}
// transform / motion compensation build reconstruction frame
- vp10_encode_frame(cpi);
+ av1_encode_frame(cpi);
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
@@ -3024,7 +3024,7 @@
// to recode.
if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
save_coding_context(cpi);
- vp10_pack_bitstream(cpi, dest, size);
+ av1_pack_bitstream(cpi, dest, size);
rc->projected_frame_size = (int)(*size) << 3;
restore_coding_context(cpi);
@@ -3043,15 +3043,15 @@
int64_t high_err_target = cpi->ambient_err;
int64_t low_err_target = cpi->ambient_err >> 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- kf_err = vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ kf_err = av1_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
- kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ kf_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
#else
- kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ kf_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Prevent possible divide by zero error below for perfect KF
kf_err += !kf_err;
@@ -3119,19 +3119,19 @@
if (undershoot_seen || loop_at_this_size > 1) {
// Update rate_correction_factor unless
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low + 1) / 2;
} else {
// Update rate_correction_factor unless
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
VPXMAX(q_high, top_index));
while (q < q_low && retries < 10) {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
VPXMAX(q_high, top_index));
retries++;
}
@@ -3143,11 +3143,11 @@
q_high = q > q_low ? q - 1 : q_low;
if (overshoot_seen || loop_at_this_size > 1) {
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low) / 2;
} else {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
top_index);
// Special case reset for qlow for constrained quality.
// This should only trigger where there is very substantial
@@ -3158,8 +3158,8 @@
}
while (q > q_high && retries < 10) {
- vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ av1_rc_update_rate_correction_factors(cpi);
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
top_index);
retries++;
}
@@ -3193,7 +3193,7 @@
} while (loop);
}
-static int get_ref_frame_flags(const VP10_COMP *cpi) {
+static int get_ref_frame_flags(const AV1_COMP *cpi) {
const int *const map = cpi->common.ref_frame_map;
const int gold_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx];
const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
@@ -3211,9 +3211,9 @@
return flags;
}
-static void set_ext_overrides(VP10_COMP *cpi) {
+static void set_ext_overrides(AV1_COMP *cpi) {
// Overrides the defaults with the externally supplied values with
- // vp10_update_reference() and vp10_update_entropy() calls
+ // av1_update_reference() and av1_update_entropy() calls
// Note: The overrides are valid only for the next frame passed
// to encode_frame_to_data_rate() function
if (cpi->ext_refresh_frame_context_pending) {
@@ -3228,7 +3228,7 @@
}
}
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
@@ -3242,24 +3242,24 @@
}
}
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
#else
scale_and_extend_frame_nonnormative(unscaled, scaled);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return scaled;
} else {
return unscaled;
}
}
-static void set_arf_sign_bias(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_arf_sign_bias(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int arf_sign_bias;
if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
@@ -3274,7 +3274,7 @@
cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
}
-static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
+static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
INTERP_FILTER ifilter;
int ref_total[MAX_REF_FRAMES] = { 0 };
MV_REFERENCE_FRAME ref;
@@ -3299,11 +3299,11 @@
return mask;
}
-static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
+static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
uint8_t *dest,
unsigned int *frame_flags) {
- VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
struct segmentation *const seg = &cm->seg;
TX_SIZE t;
@@ -3322,7 +3322,7 @@
// Set various flags etc to special state if it is a key frame.
if (frame_is_intra_only(cm)) {
// Reset the loop filter deltas and segmentation map.
- vp10_reset_segment_features(cm);
+ av1_reset_segment_features(cm);
// If segmentation is enabled force a map update for key frames.
if (seg->enabled) {
@@ -3349,8 +3349,8 @@
// Never drop on key frame.
if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
cm->frame_type != KEY_FRAME) {
- if (vp10_rc_drop_frame(cpi)) {
- vp10_rc_postencode_update_drop_frame(cpi);
+ if (av1_rc_drop_frame(cpi)) {
+ av1_rc_postencode_update_drop_frame(cpi);
++cm->current_video_frame;
return;
}
@@ -3371,7 +3371,7 @@
#ifdef OUTPUT_YUV_SKINMAP
if (cpi->common.current_video_frame > 1) {
- vp10_compute_skin_map(cpi, yuv_skinmap_file);
+ av1_compute_skin_map(cpi, yuv_skinmap_file);
}
#endif
@@ -3379,16 +3379,16 @@
// fixed interval. Note the reconstruction error if it is the frame before
// the force key frame
if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
cpi->ambient_err =
- vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ av1_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
- cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+ cpi->ambient_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
#else
- cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ cpi->ambient_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
// If the encoder forced a KEY_FRAME decision
@@ -3404,32 +3404,32 @@
loopfilter_frame(cpi, cm);
// build the bitstream
- vp10_pack_bitstream(cpi, dest, size);
+ av1_pack_bitstream(cpi, dest, size);
if (cm->seg.update_map) update_reference_segmentation_map(cpi);
if (frame_is_intra_only(cm) == 0) {
release_scaled_references(cpi);
}
- vp10_update_reference_frames(cpi);
+ av1_update_reference_frames(cpi);
for (t = TX_4X4; t <= TX_32X32; t++)
full_to_model_counts(cpi->td.counts->coef[t],
cpi->td.rd_counts.coef_counts[t]);
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_adapt_coef_probs(cm);
+ av1_adapt_coef_probs(cm);
#if CONFIG_MISC_FIXES
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
#else
- if (!frame_is_intra_only(cm)) vp10_adapt_intra_frame_probs(cm);
+ if (!frame_is_intra_only(cm)) av1_adapt_intra_frame_probs(cm);
#endif
}
if (!frame_is_intra_only(cm)) {
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_adapt_inter_frame_probs(cm);
- vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ av1_adapt_inter_frame_probs(cm);
+ av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
}
@@ -3447,7 +3447,7 @@
cm->last_frame_type = cm->frame_type;
- vp10_rc_postencode_update(cpi, *size);
+ av1_rc_postencode_update(cpi, *size);
#if 0
output_frame_level_debug_stats(cpi);
@@ -3474,7 +3474,7 @@
if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
if (cm->show_frame) {
- vp10_swap_mi_and_prev_mi(cm);
+ av1_swap_mi_and_prev_mi(cm);
// Don't increment frame counters if this was an altref buffer
// update not a real frame
++cm->current_video_frame;
@@ -3482,25 +3482,25 @@
cm->prev_frame = cm->cur_frame;
}
-static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass0Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
unsigned int *frame_flags) {
if (cpi->oxcf.rc_mode == VPX_CBR) {
- vp10_rc_get_one_pass_cbr_params(cpi);
+ av1_rc_get_one_pass_cbr_params(cpi);
} else {
- vp10_rc_get_one_pass_vbr_params(cpi);
+ av1_rc_get_one_pass_vbr_params(cpi);
}
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
}
-static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass2Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
unsigned int *frame_flags) {
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
- vp10_twopass_postencode_update(cpi);
+ av1_twopass_postencode_update(cpi);
}
-static void init_ref_frame_bufs(VP10_COMMON *cm) {
+static void init_ref_frame_bufs(AV1_COMMON *cm) {
int i;
BufferPool *const pool = cm->buffer_pool;
cm->new_fb_idx = INVALID_IDX;
@@ -3510,22 +3510,22 @@
}
}
-static void check_initial_width(VP10_COMP *cpi,
-#if CONFIG_VPX_HIGHBITDEPTH
+static void check_initial_width(AV1_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
int subsampling_x, int subsampling_y) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (!cpi->initial_width ||
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth != use_highbitdepth ||
#endif
cm->subsampling_x != subsampling_x ||
cm->subsampling_y != subsampling_y) {
cm->subsampling_x = subsampling_x;
cm->subsampling_y = subsampling_y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = use_highbitdepth;
#endif
@@ -3541,27 +3541,27 @@
}
}
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time) {
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
struct aom_usec_timer timer;
int res = 0;
const int subsampling_x = sd->subsampling_x;
const int subsampling_y = sd->subsampling_y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int use_highbitdepth = sd->flags & YV12_FLAG_HIGHBITDEPTH;
check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
#else
check_initial_width(cpi, subsampling_x, subsampling_y);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
aom_usec_timer_start(&timer);
- if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
-#if CONFIG_VPX_HIGHBITDEPTH
+ if (av1_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
frame_flags))
res = -1;
aom_usec_timer_mark(&timer);
@@ -3583,8 +3583,8 @@
return res;
}
-static int frame_is_reference(const VP10_COMP *cpi) {
- const VP10_COMMON *cm = &cpi->common;
+static int frame_is_reference(const AV1_COMP *cpi) {
+ const AV1_COMMON *cm = &cpi->common;
return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
@@ -3593,7 +3593,7 @@
cm->seg.update_data;
}
-static void adjust_frame_rate(VP10_COMP *cpi,
+static void adjust_frame_rate(AV1_COMP *cpi,
const struct lookahead_entry *source) {
int64_t this_duration;
int step = 0;
@@ -3614,7 +3614,7 @@
if (this_duration) {
if (step) {
- vp10_new_framerate(cpi, 10000000.0 / this_duration);
+ av1_new_framerate(cpi, 10000000.0 / this_duration);
} else {
// Average this frame's rate into the last second's average
// frame rate. If we haven't seen 1 second yet, then average
@@ -3625,7 +3625,7 @@
avg_duration *= (interval - avg_duration + this_duration);
avg_duration /= interval;
- vp10_new_framerate(cpi, 10000000.0 / avg_duration);
+ av1_new_framerate(cpi, 10000000.0 / avg_duration);
}
}
cpi->last_time_stamp_seen = source->ts_start;
@@ -3634,7 +3634,7 @@
// Returns 0 if this is not an alt ref else the offset of the source frame
// used as the arf midpoint.
-static int get_arf_src_index(VP10_COMP *cpi) {
+static int get_arf_src_index(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
int arf_src_index = 0;
if (is_altref_enabled(cpi)) {
@@ -3650,7 +3650,7 @@
return arf_src_index;
}
-static void check_src_altref(VP10_COMP *cpi,
+static void check_src_altref(AV1_COMP *cpi,
const struct lookahead_entry *source) {
RATE_CONTROL *const rc = &cpi->rc;
@@ -3674,7 +3674,7 @@
}
#if CONFIG_INTERNAL_STATS
-extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+extern double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
const unsigned char *img2, int img2_pitch,
int width, int height);
@@ -3688,11 +3688,11 @@
}
#endif // CONFIG_INTERNAL_STATS
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
size_t *size, uint8_t *dest, int64_t *time_stamp,
int64_t *time_end, int flush) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
- VP10_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
struct aom_usec_timer cmptimer;
@@ -3704,7 +3704,7 @@
aom_usec_timer_start(&cmptimer);
- vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+ av1_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
// Is multi-arf enabled.
// Note that at the moment multi_arf is only configured for 2 pass VBR
@@ -3731,12 +3731,12 @@
if (arf_src_index) {
assert(arf_src_index <= rc->frames_to_key);
- if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
+ if ((source = av1_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
cpi->alt_ref_source = source;
if (oxcf->arnr_max_frames > 0) {
// Produce the filtered ARF frame.
- vp10_temporal_filter(cpi, arf_src_index);
+ av1_temporal_filter(cpi, arf_src_index);
aom_extend_frame_borders(&cpi->alt_ref_buffer);
force_src_buffer = &cpi->alt_ref_buffer;
}
@@ -3756,12 +3756,12 @@
if (!source) {
// Get last frame source.
if (cm->current_video_frame > 0) {
- if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL)
+ if ((last_source = av1_lookahead_peek(cpi->lookahead, -1)) == NULL)
return -1;
}
// Read in the source frame.
- source = vp10_lookahead_pop(cpi->lookahead, flush);
+ source = av1_lookahead_pop(cpi->lookahead, flush);
if (source != NULL) {
cm->show_frame = 1;
@@ -3785,7 +3785,7 @@
} else {
*size = 0;
if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
- vp10_end_first_pass(cpi); /* get last stats packet */
+ av1_end_first_pass(cpi); /* get last stats packet */
cpi->twopass.first_pass_done = 1;
}
return -1;
@@ -3830,7 +3830,7 @@
cpi->frame_flags = *frame_flags;
if (oxcf->pass == 2) {
- vp10_rc_get_second_pass_params(cpi);
+ av1_rc_get_second_pass_params(cpi);
} else if (oxcf->pass == 1) {
set_frame_size(cpi);
}
@@ -3847,7 +3847,7 @@
if (oxcf->pass == 1) {
cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf);
- vp10_first_pass(cpi, source);
+ av1_first_pass(cpi, source);
} else if (oxcf->pass == 2) {
Pass2Encode(cpi, size, dest, frame_flags);
} else {
@@ -3886,12 +3886,12 @@
YV12_BUFFER_CONFIG *orig = cpi->Source;
YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
PSNR_STATS psnr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd,
cpi->oxcf.input_bit_depth);
#else
calc_psnr(orig, recon, &psnr);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3],
psnr.psnr[0], &cpi->psnr);
@@ -3903,7 +3903,7 @@
double frame_ssim2 = 0, weight = 0;
aom_clear_system_state();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
frame_ssim2 =
aom_highbd_calc_ssim(orig, recon, &weight, (int)cm->bit_depth);
@@ -3912,7 +3912,7 @@
}
#else
frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
cpi->summed_quality += frame_ssim2 * weight;
@@ -3932,11 +3932,11 @@
}
}
if (cpi->b_calculate_blockiness) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
- double frame_blockiness = vp10_get_blockiness(
+ double frame_blockiness = av1_get_blockiness(
cpi->Source->y_buffer, cpi->Source->y_stride,
cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
cpi->Source->y_width, cpi->Source->y_height);
@@ -3947,7 +3947,7 @@
}
if (cpi->b_calculate_consistency) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
@@ -3969,7 +3969,7 @@
if (cpi->b_calculate_ssimg) {
double y, u, v, frame_all;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
frame_all = aom_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
&u, &v, (int)cm->bit_depth);
@@ -3979,10 +3979,10 @@
}
#else
frame_all = aom_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
adjust_image_stat(y, u, v, frame_all, &cpi->ssimg);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
@@ -3992,7 +3992,7 @@
adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
/* TODO(JBB): add 10/12 bit support */
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (!cm->use_highbitdepth)
#endif
{
@@ -4008,8 +4008,8 @@
return 0;
}
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
- VP10_COMMON *cm = &cpi->common;
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
+ AV1_COMMON *cm = &cpi->common;
if (!cm->show_frame) {
return -1;
@@ -4030,9 +4030,9 @@
}
}
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+int av1_set_internal_size(AV1_COMP *cpi, VPX_SCALING horiz_mode,
VPX_SCALING vert_mode) {
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
int hr = 0, hs = 0, vr = 0, vs = 0;
if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
@@ -4051,14 +4051,14 @@
return 0;
}
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
unsigned int height) {
- VP10_COMMON *cm = &cpi->common;
-#if CONFIG_VPX_HIGHBITDEPTH
+ AV1_COMMON *cm = &cpi->common;
+#if CONFIG_AOM_HIGHBITDEPTH
check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
#else
check_initial_width(cpi, 1, 1);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (width) {
cm->width = width;
@@ -4083,7 +4083,7 @@
return 0;
}
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t av1_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
@@ -4092,8 +4092,8 @@
a->y_crop_width, a->y_crop_height);
}
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
@@ -4103,11 +4103,11 @@
return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
a->y_crop_width, a->y_crop_height);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
+int av1_get_quantizer(AV1_COMP *cpi) { return cpi->common.base_qindex; }
-void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags) {
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags) {
if (flags &
(VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
int ref = 7;
@@ -4118,7 +4118,7 @@
if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
- vp10_use_as_reference(cpi, ref);
+ av1_use_as_reference(cpi, ref);
}
if (flags &
@@ -4132,10 +4132,10 @@
if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
- vp10_update_reference(cpi, upd);
+ av1_update_reference(cpi, upd);
}
if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
- vp10_update_entropy(cpi, 0);
+ av1_update_entropy(cpi, 0);
}
}
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 3af900d..81ef833 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_ENCODER_H_
-#define VP10_ENCODER_ENCODER_H_
+#ifndef AV1_ENCODER_ENCODER_H_
+#define AV1_ENCODER_ENCODER_H_
#include <stdio.h>
@@ -117,7 +117,7 @@
RESIZE_DYNAMIC = 2 // Coded size of each frame is determined by the codec.
} RESIZE_TYPE;
-typedef struct VP10EncoderConfig {
+typedef struct AV1EncoderConfig {
BITSTREAM_PROFILE profile;
aom_bit_depth_t bit_depth; // Codec bit-depth.
int width; // width of data passed to the compressor
@@ -226,16 +226,16 @@
aom_tune_metric tuning;
aom_tune_content content;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth;
#endif
aom_color_space_t color_space;
int color_range;
int render_width;
int render_height;
-} VP10EncoderConfig;
+} AV1EncoderConfig;
-static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) {
+static INLINE int is_lossless_requested(const AV1EncoderConfig *cfg) {
return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0;
}
@@ -247,7 +247,7 @@
} TileDataEnc;
typedef struct RD_COUNTS {
- vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
+ av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
int64_t comp_pred_diff[REFERENCE_MODES];
int64_t filter_diff[SWITCHABLE_FILTER_CONTEXTS];
int m_search_count;
@@ -279,14 +279,14 @@
double worst;
} ImageStat;
-typedef struct VP10_COMP {
+typedef struct AV1_COMP {
QUANTS quants;
ThreadData td;
MB_MODE_INFO_EXT *mbmi_ext_base;
DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
- VP10_COMMON common;
- VP10EncoderConfig oxcf;
+ AV1_COMMON common;
+ AV1EncoderConfig oxcf;
struct lookahead_ctx *lookahead;
struct lookahead_entry *alt_ref_source;
@@ -376,8 +376,8 @@
ActiveMap active_map;
fractional_mv_step_fp *find_fractional_mv_step;
- vp10_full_search_fn_t full_search_sad;
- vp10_diamond_search_fn_t diamond_search_sad;
+ av1_full_search_fn_t full_search_sad;
+ av1_diamond_search_fn_t diamond_search_sad;
aom_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
uint64_t time_receive_data;
uint64_t time_compress_data;
@@ -484,59 +484,59 @@
int num_workers;
VPxWorker *workers;
struct EncWorkerData *tile_thr_data;
- VP10LfSync lf_row_sync;
-} VP10_COMP;
+ AV1LfSync lf_row_sync;
+} AV1_COMP;
-void vp10_initialize_enc(void);
+void av1_initialize_enc(void);
-struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+struct AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
BufferPool *const pool);
-void vp10_remove_compressor(VP10_COMP *cpi);
+void av1_remove_compressor(AV1_COMP *cpi);
-void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
+void av1_change_config(AV1_COMP *cpi, const AV1EncoderConfig *oxcf);
// receive a frames worth of data. caller can assume that a copy of this
// frame is made and not just a copy of the pointer..
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time_stamp);
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
size_t *size, uint8_t *dest, int64_t *time_stamp,
int64_t *time_end, int flush);
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest);
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags);
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags);
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_set_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp10_update_entropy(VP10_COMP *cpi, int update);
+int av1_update_entropy(AV1_COMP *cpi, int update);
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+int av1_set_internal_size(AV1_COMP *cpi, VPX_SCALING horiz_mode,
VPX_SCALING vert_mode);
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
unsigned int height);
-int vp10_get_quantizer(struct VP10_COMP *cpi);
+int av1_get_quantizer(struct AV1_COMP *cpi);
-static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
+static INLINE int frame_is_kf_gf_arf(const AV1_COMP *cpi) {
return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
}
-static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
+static INLINE int get_ref_frame_map_idx(const AV1_COMP *cpi,
MV_REFERENCE_FRAME ref_frame) {
if (ref_frame == LAST_FRAME) {
return cpi->lst_fb_idx;
@@ -547,16 +547,16 @@
}
}
-static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi,
+static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
int ref_frame) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX;
}
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
- VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+ AV1_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
: NULL;
@@ -580,37 +580,37 @@
return get_token_alloc(tile_mb_rows, tile_mb_cols);
}
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t av1_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b);
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_alloc_compressor_data(VP10_COMP *cpi);
+void av1_alloc_compressor_data(AV1_COMP *cpi);
-void vp10_scale_references(VP10_COMP *cpi);
+void av1_scale_references(AV1_COMP *cpi);
-void vp10_update_reference_frames(VP10_COMP *cpi);
+void av1_update_reference_frames(AV1_COMP *cpi);
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv);
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled);
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled);
-void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags);
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags);
-static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_altref_enabled(const AV1_COMP *const cpi) {
return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
cpi->oxcf.enable_auto_arf;
}
-static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
xd->block_refs[0] =
@@ -623,11 +623,11 @@
return frame_index & 0x1;
}
-static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) {
+static INLINE int *cond_cost_list(const struct AV1_COMP *cpi, int *cost_list) {
return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL;
}
-void vp10_new_framerate(VP10_COMP *cpi, double framerate);
+void av1_new_framerate(AV1_COMP *cpi, double framerate);
#define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl))
@@ -635,4 +635,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_ENCODER_H_
+#endif // AV1_ENCODER_ENCODER_H_
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index 63956c4..371bf25 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -38,8 +38,8 @@
}
static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
- VP10_COMP *const cpi = thread_data->cpi;
- const VP10_COMMON *const cm = &cpi->common;
+ AV1_COMP *const cpi = thread_data->cpi;
+ const AV1_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
int t;
@@ -51,20 +51,20 @@
int tile_row = t / tile_cols;
int tile_col = t % tile_cols;
- vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+ av1_encode_tile(cpi, thread_data->td, tile_row, tile_col);
}
return 0;
}
-void vp10_encode_tiles_mt(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tiles_mt(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const VPxWorkerInterface *const winterface = aom_get_worker_interface();
const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
int i;
- vp10_init_tile_data(cpi);
+ av1_init_tile_data(cpi);
// Only run once to create threads and allocate thread data.
if (cpi->num_workers == 0) {
@@ -89,12 +89,12 @@
// Allocate thread data.
CHECK_MEM_ERROR(cm, thread_data->td,
aom_memalign(32, sizeof(*thread_data->td)));
- vp10_zero(*thread_data->td);
+ av1_zero(*thread_data->td);
// Set up pc_tree.
thread_data->td->leaf_tree = NULL;
thread_data->td->pc_tree = NULL;
- vp10_setup_pc_tree(cm, thread_data->td);
+ av1_setup_pc_tree(cm, thread_data->td);
// Allocate frame counters in thread data.
CHECK_MEM_ERROR(cm, thread_data->td->counts,
@@ -160,7 +160,7 @@
// Accumulate counters.
if (i < cpi->num_workers - 1) {
- vp10_accumulate_frame_counts(cm, thread_data->td->counts, 0);
+ av1_accumulate_frame_counts(cm, thread_data->td->counts, 0);
accumulate_rd_opt(&cpi->td, thread_data->td);
}
}
diff --git a/av1/encoder/ethread.h b/av1/encoder/ethread.h
index 7403bd3..6c30a3e 100644
--- a/av1/encoder/ethread.h
+++ b/av1/encoder/ethread.h
@@ -9,26 +9,26 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_ETHREAD_H_
-#define VP10_ENCODER_ETHREAD_H_
+#ifndef AV1_ENCODER_ETHREAD_H_
+#define AV1_ENCODER_ETHREAD_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
typedef struct EncWorkerData {
- struct VP10_COMP *cpi;
+ struct AV1_COMP *cpi;
struct ThreadData *td;
int start;
} EncWorkerData;
-void vp10_encode_tiles_mt(struct VP10_COMP *cpi);
+void av1_encode_tiles_mt(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_ETHREAD_H_
+#endif // AV1_ENCODER_ETHREAD_H_
diff --git a/av1/encoder/extend.c b/av1/encoder/extend.c
index efb6515..c7aaa49 100644
--- a/av1/encoder/extend.c
+++ b/av1/encoder/extend.c
@@ -57,7 +57,7 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
uint8_t *dst8, int dst_pitch, int w,
int h, int extend_top, int extend_left,
@@ -100,9 +100,9 @@
dst_ptr2 += dst_pitch;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst) {
// Extend src frame in buffer
// Altref filtering assumes 16 pixel extension
@@ -124,7 +124,7 @@
const int eb_uv = eb_y >> uv_height_subsampling;
const int er_uv = er_y >> uv_width_subsampling;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
dst->y_stride, src->y_crop_width,
@@ -139,7 +139,7 @@
src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
dst->y_stride, src->y_crop_width, src->y_crop_height,
@@ -154,7 +154,7 @@
et_uv, el_uv, eb_uv, er_uv);
}
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int srcy,
int srcx, int srch, int srcw) {
// If the side is not touching the bounder then don't extend.
diff --git a/av1/encoder/extend.h b/av1/encoder/extend.h
index ba94a6df..ccd8654 100644
--- a/av1/encoder/extend.h
+++ b/av1/encoder/extend.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_EXTEND_H_
-#define VP10_ENCODER_EXTEND_H_
+#ifndef AV1_ENCODER_EXTEND_H_
+#define AV1_ENCODER_EXTEND_H_
#include "aom_scale/yv12config.h"
#include "aom/aom_integer.h"
@@ -19,14 +19,14 @@
extern "C" {
#endif
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst);
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int srcy,
int srcx, int srch, int srcw);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_EXTEND_H_
+#endif // AV1_ENCODER_EXTEND_H_
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index 13b888d..2c22bbe 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -25,7 +25,7 @@
#include "av1/common/entropymv.h"
#include "av1/common/quant_common.h"
-#include "av1/common/reconinter.h" // vp10_setup_dst_planes()
+#include "av1/common/reconinter.h" // av1_setup_dst_planes()
#include "av1/encoder/aq_variance.h"
#include "av1/encoder/block.h"
#include "av1/encoder/encodeframe.h"
@@ -127,7 +127,7 @@
}
#if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, AV1_COMMON *cm,
struct aom_codec_pkt_list *pktlist) {
struct aom_codec_cx_pkt pkt;
pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
@@ -218,7 +218,7 @@
// bars and partially discounts other 0 energy areas.
#define MIN_ACTIVE_AREA 0.5
#define MAX_ACTIVE_AREA 1.0
-static double calculate_active_area(const VP10_COMP *cpi,
+static double calculate_active_area(const AV1_COMP *cpi,
const FIRSTPASS_STATS *this_frame) {
double active_pct;
@@ -232,9 +232,9 @@
// Calculate a modified Error used in distributing bits between easier and
// harder frames.
#define ACT_AREA_CORRECTION 0.5
-static double calculate_modified_err(const VP10_COMP *cpi,
+static double calculate_modified_err(const AV1_COMP *cpi,
const TWO_PASS *twopass,
- const VP10EncoderConfig *oxcf,
+ const AV1EncoderConfig *oxcf,
const FIRSTPASS_STATS *this_frame) {
const FIRSTPASS_STATS *const stats = &twopass->total_stats;
const double av_weight = stats->weight / stats->count;
@@ -258,7 +258,7 @@
// This function returns the maximum target rate per frame.
static int frame_max_bits(const RATE_CONTROL *rc,
- const VP10EncoderConfig *oxcf) {
+ const AV1EncoderConfig *oxcf) {
int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
(int64_t)oxcf->two_pass_vbrmax_section) /
100;
@@ -270,11 +270,11 @@
return (int)max_bits;
}
-void vp10_init_first_pass(VP10_COMP *cpi) {
+void av1_init_first_pass(AV1_COMP *cpi) {
zero_stats(&cpi->twopass.total_stats);
}
-void vp10_end_first_pass(VP10_COMP *cpi) {
+void av1_end_first_pass(AV1_COMP *cpi) {
output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
}
@@ -296,7 +296,7 @@
return sse;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static aom_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
int bd) {
switch (bd) {
@@ -336,11 +336,11 @@
fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
return sse;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Refine the motion search range according to the frame dimension
// for first pass test.
-static int get_search_range(const VP10_COMP *cpi) {
+static int get_search_range(const AV1_COMP *cpi) {
int sr = 0;
const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
@@ -348,7 +348,7 @@
return sr;
}
-static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void first_pass_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
const MV *ref_mv, MV *best_mv,
int *best_motion_err) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -367,18 +367,18 @@
// Override the default variance function to use MSE.
v_fn_ptr.vf = get_block_variance_fn(bsize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Center the initial step/diamond search on best mv.
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
step_param, x->sadperbit16, &num00,
&v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
- tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+ tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
if (tmp_err < *best_motion_err) {
@@ -400,7 +400,7 @@
step_param + n, x->sadperbit16, &num00,
&v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
- tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+ tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty)
tmp_err += new_mv_mode_penalty;
@@ -412,7 +412,7 @@
}
}
-static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
+static BLOCK_SIZE get_bsize(const AV1_COMMON *cm, int mb_row, int mb_col) {
if (2 * mb_col + 1 < cm->mi_cols) {
return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
} else {
@@ -424,15 +424,15 @@
int i;
for (i = 0; i < QINDEX_RANGE; ++i)
- if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
if (i == QINDEX_RANGE) i--;
return i;
}
-static void set_first_pass_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void set_first_pass_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
if (!cpi->refresh_alt_ref_frame &&
(cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
cm->frame_type = KEY_FRAME;
@@ -445,10 +445,10 @@
#define UL_INTRA_THRESH 50
#define INVALID_ROW -1
-void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
+void av1_first_pass(AV1_COMP *cpi, const struct lookahead_entry *source) {
int mb_row, mb_col;
MACROBLOCK *const x = &cpi->td.mb;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
TileInfo tile;
struct macroblock_plane *const p = x->plane;
@@ -492,7 +492,7 @@
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
- vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
+ av1_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
}
#endif
@@ -503,21 +503,21 @@
neutral_count = 0.0;
set_first_pass_params(cpi);
- vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
+ av1_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
- vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
- vp10_setup_src_planes(x, cpi->Source, 0, 0);
- vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0);
+ av1_setup_src_planes(x, cpi->Source, 0, 0);
+ av1_setup_dst_planes(xd->plane, new_yv12, 0, 0);
if (!frame_is_intra_only(cm)) {
- vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
+ av1_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
}
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
- vp10_frame_init_quantizer(cpi);
+ av1_frame_init_quantizer(cpi);
for (i = 0; i < MAX_MB_PLANE; ++i) {
p[i].coeff = ctx->coeff_pbuf[i][1];
@@ -527,11 +527,11 @@
}
x->skip_recode = 0;
- vp10_init_mv_probs(cm);
- vp10_initialize_rd_consts(cpi);
+ av1_init_mv_probs(cm);
+ av1_initialize_rd_consts(cpi);
// Tiling is ignored in the first pass.
- vp10_tile_init(&tile, cm, 0, 0);
+ av1_tile_init(&tile, cm, 0, 0);
recon_y_stride = new_yv12->y_stride;
recon_uv_stride = new_yv12->uv_stride;
@@ -578,7 +578,7 @@
xd->mi[0]->mbmi.mode = DC_PRED;
xd->mi[0]->mbmi.tx_size =
use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
- vp10_encode_intra_block_plane(x, bsize, 0);
+ av1_encode_intra_block_plane(x, bsize, 0);
this_error = aom_get_mb_ss(x->plane[0].src_diff);
// Keep a record of blocks that have almost no intra error residual
@@ -592,7 +592,7 @@
image_data_start_row = mb_row;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
case VPX_BITS_8: break;
@@ -605,7 +605,7 @@
return;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
aom_clear_system_state();
log_intra = log(this_error + 1.0);
@@ -614,7 +614,7 @@
else
intra_factor += 1.0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth)
level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0];
else
@@ -659,7 +659,7 @@
struct buf_2d unscaled_last_source_buf_2d;
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -670,7 +670,7 @@
#else
motion_error =
get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Compute the motion error of the 0,0 motion using the last source
// frame as the reference. Skip the further motion search on
@@ -679,7 +679,7 @@
cpi->unscaled_last_source->y_buffer + recon_yoffset;
unscaled_last_source_buf_2d.stride =
cpi->unscaled_last_source->y_stride;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
raw_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
@@ -690,7 +690,7 @@
#else
raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&unscaled_last_source_buf_2d);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(pengchong): Replace the hard-coded threshold
if (raw_motion_error > 25) {
@@ -716,7 +716,7 @@
int gf_motion_error;
xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
gf_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -727,7 +727,7 @@
#else
gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
&xd->plane[0].pre[0]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
&gf_motion_error);
@@ -798,8 +798,8 @@
xd->mi[0]->mbmi.tx_size = TX_4X4;
xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
xd->mi[0]->mbmi.ref_frame[1] = NONE;
- vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
- vp10_encode_sby_pass1(x, bsize);
+ av1_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
+ av1_encode_sby_pass1(x, bsize);
sum_mvr += mv.row;
sum_mvr_abs += abs(mv.row);
sum_mvc += mv.col;
@@ -1047,7 +1047,7 @@
// Adjustment based on actual quantizer to power term.
const double power_term =
- VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
+ VPXMIN(av1_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
// Calculate correction factor.
if (power_term < 1.0) assert(error_term >= 0.0);
@@ -1060,13 +1060,13 @@
// increased size and hence coding cost of motion vectors.
#define EDIV_SIZE_FACTOR 800
-static int get_twopass_worst_quality(const VP10_COMP *cpi,
+static int get_twopass_worst_quality(const AV1_COMP *cpi,
const double section_err,
double inactive_zone,
int section_target_bandwidth,
double group_weight_factor) {
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
@@ -1091,7 +1091,7 @@
const double factor = calc_correction_factor(
av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
FACTOR_PT_HIGH, q, cpi->common.bit_depth);
- const int bits_per_mb = vp10_rc_bits_per_mb(
+ const int bits_per_mb = av1_rc_bits_per_mb(
INTER_FRAME, q, factor * speed_term * group_weight_factor,
cpi->common.bit_depth);
if (bits_per_mb <= target_norm_bits_per_mb) break;
@@ -1103,17 +1103,17 @@
}
}
-static void setup_rf_level_maxq(VP10_COMP *cpi) {
+static void setup_rf_level_maxq(AV1_COMP *cpi) {
int i;
RATE_CONTROL *const rc = &cpi->rc;
for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) {
- int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality);
+ int qdelta = av1_frame_type_qdelta(cpi, i, rc->worst_quality);
rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
}
}
-void vp10_init_subsampling(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_init_subsampling(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
const int w = cm->width;
const int h = cm->height;
@@ -1128,15 +1128,15 @@
setup_rf_level_maxq(cpi);
}
-void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
+void av1_calculate_coded_size(AV1_COMP *cpi, int *scaled_frame_width,
int *scaled_frame_height) {
RATE_CONTROL *const rc = &cpi->rc;
*scaled_frame_width = rc->frame_width[rc->frame_size_selector];
*scaled_frame_height = rc->frame_height[rc->frame_size_selector];
}
-void vp10_init_second_pass(VP10_COMP *cpi) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_init_second_pass(AV1_COMP *cpi) {
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
double frame_rate;
FIRSTPASS_STATS *stats;
@@ -1157,7 +1157,7 @@
// encoded in the second pass is a guess. However, the sum duration is not.
// It is calculated based on the actual durations of all frames from the
// first pass.
- vp10_new_framerate(cpi, frame_rate);
+ av1_new_framerate(cpi, frame_rate);
twopass->bits_left =
(int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
@@ -1193,7 +1193,7 @@
twopass->last_kfgroup_zeromotion_pct = 100;
if (oxcf->resize_mode != RESIZE_NONE) {
- vp10_init_subsampling(cpi);
+ av1_init_subsampling(cpi);
}
}
@@ -1204,7 +1204,7 @@
#define LOW_SR_DIFF_TRHESH 0.1
#define SR_DIFF_MAX 128.0
-static double get_sr_decay_rate(const VP10_COMP *cpi,
+static double get_sr_decay_rate(const AV1_COMP *cpi,
const FIRSTPASS_STATS *frame) {
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
: cpi->common.MBs;
@@ -1233,7 +1233,7 @@
// This function gives an estimate of how badly we believe the prediction
// quality is decaying from frame to frame.
-static double get_zero_motion_factor(const VP10_COMP *cpi,
+static double get_zero_motion_factor(const AV1_COMP *cpi,
const FIRSTPASS_STATS *frame) {
const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
double sr_decay = get_sr_decay_rate(cpi, frame);
@@ -1242,7 +1242,7 @@
#define ZM_POWER_FACTOR 0.75
-static double get_prediction_decay_rate(const VP10_COMP *cpi,
+static double get_prediction_decay_rate(const AV1_COMP *cpi,
const FIRSTPASS_STATS *next_frame) {
const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
const double zero_motion_factor =
@@ -1256,7 +1256,7 @@
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+static int detect_transition_to_still(AV1_COMP *cpi, int frame_interval,
int still_interval,
double loop_decay_rate,
double last_decay_rate) {
@@ -1330,11 +1330,11 @@
}
#define BASELINE_ERR_PER_MB 1000.0
-static double calc_frame_boost(VP10_COMP *cpi,
+static double calc_frame_boost(AV1_COMP *cpi,
const FIRSTPASS_STATS *this_frame,
double this_frame_mv_in_out, double max_boost) {
double frame_boost;
- const double lq = vp10_convert_qindex_to_q(
+ const double lq = av1_convert_qindex_to_q(
cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
@@ -1360,7 +1360,7 @@
return VPXMIN(frame_boost, max_boost * boost_q_correction);
}
-static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
+static int calc_arf_boost(AV1_COMP *cpi, int offset, int f_frames,
int b_frames, int *f_boost, int *b_boost) {
TWO_PASS *const twopass = &cpi->twopass;
int i;
@@ -1468,7 +1468,7 @@
}
// Calculate the total bits to allocate in this GF/ARF group.
-static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
+static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
double gf_group_err) {
const RATE_CONTROL *const rc = &cpi->rc;
const TWO_PASS *const twopass = &cpi->twopass;
@@ -1530,10 +1530,10 @@
arf_buffer_indices[1] = ARF_SLOT2;
}
-static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
+static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
double group_error, int gf_arf_bits) {
RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
FIRSTPASS_STATS frame_stats;
@@ -1645,7 +1645,7 @@
// Note:
// We need to configure the frame at the end of the sequence + 1 that will be
// the start frame for the next group. Otherwise prior to the call to
- // vp10_rc_get_second_pass_params() the data will be undefined.
+ // av1_rc_get_second_pass_params() the data will be undefined.
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
@@ -1670,10 +1670,10 @@
}
// Analyse and define a gf/arf group.
-static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
- VP10_COMMON *const cm = &cpi->common;
+static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
- VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ AV1EncoderConfig *const oxcf = &cpi->oxcf;
TWO_PASS *const twopass = &cpi->twopass;
FIRSTPASS_STATS next_frame;
const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
@@ -1717,11 +1717,11 @@
// Reset the GF group data structures unless this is a key
// frame in which case it will already have been done.
if (is_key_frame == 0) {
- vp10_zero(twopass->gf_group);
+ av1_zero(twopass->gf_group);
}
aom_clear_system_state();
- vp10_zero(next_frame);
+ av1_zero(next_frame);
// Load stats for the current frame.
mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
@@ -1748,9 +1748,9 @@
// Set a maximum and minimum interval for the GF group.
// If the image appears almost completely static we can extend beyond this.
{
- int int_max_q = (int)(vp10_convert_qindex_to_q(
+ int int_max_q = (int)(av1_convert_qindex_to_q(
twopass->active_worst_quality, cpi->common.bit_depth));
- int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
+ int int_lbq = (int)(av1_convert_qindex_to_q(rc->last_boosted_qindex,
cpi->common.bit_depth));
active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
if (active_min_gf_interval > rc->max_gf_interval)
@@ -2061,12 +2061,12 @@
return is_viable_kf;
}
-static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int i, j;
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const FIRSTPASS_STATS first_frame = *this_frame;
const FIRSTPASS_STATS *const start_position = twopass->stats_in;
FIRSTPASS_STATS next_frame;
@@ -2081,12 +2081,12 @@
double kf_group_err = 0.0;
double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
- vp10_zero(next_frame);
+ av1_zero(next_frame);
cpi->common.frame_type = KEY_FRAME;
// Reset the GF group data structures.
- vp10_zero(*gf_group);
+ av1_zero(*gf_group);
// Is this a forced key frame by interval.
rc->this_key_frame_forced = rc->next_key_frame_forced;
@@ -2293,7 +2293,7 @@
}
// Define the reference buffers that will be updated post encode.
-static void configure_buffer_updates(VP10_COMP *cpi) {
+static void configure_buffer_updates(AV1_COMP *cpi) {
TWO_PASS *const twopass = &cpi->twopass;
cpi->rc.is_src_frame_alt_ref = 0;
@@ -2328,7 +2328,7 @@
}
}
-static int is_skippable_frame(const VP10_COMP *cpi) {
+static int is_skippable_frame(const AV1_COMP *cpi) {
// If the current frame does not have non-zero motion vector detected in the
// first pass, and so do its previous and forward frames, then this frame
// can be skipped for partition check, and the partition size is assigned
@@ -2347,8 +2347,8 @@
twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
}
-void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_second_pass_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
@@ -2367,7 +2367,7 @@
int target_rate;
configure_buffer_updates(cpi);
target_rate = gf_group->bit_allocation[gf_group->index];
- target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
rc->base_frame_target = target_rate;
cm->frame_type = INTER_FRAME;
@@ -2405,12 +2405,12 @@
twopass->baseline_active_worst_quality = tmp_q;
rc->ni_av_qi = tmp_q;
rc->last_q[INTER_FRAME] = tmp_q;
- rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth);
+ rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->bit_depth);
rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2;
rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
}
- vp10_zero(this_frame);
+ av1_zero(this_frame);
if (EOF == input_stats(twopass, &this_frame)) return;
// Set the frame content type flag.
@@ -2460,9 +2460,9 @@
target_rate = gf_group->bit_allocation[gf_group->index];
if (cpi->common.frame_type == KEY_FRAME)
- target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_iframe_target_size(cpi, target_rate);
else
- target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+ target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
rc->base_frame_target = target_rate;
@@ -2483,7 +2483,7 @@
#define MINQ_ADJ_LIMIT 48
#define MINQ_ADJ_LIMIT_CQ 20
#define HIGH_UNDERSHOOT_RATIO 2
-void vp10_twopass_postencode_update(VP10_COMP *cpi) {
+void av1_twopass_postencode_update(AV1_COMP *cpi) {
TWO_PASS *const twopass = &cpi->twopass;
RATE_CONTROL *const rc = &cpi->rc;
const int bits_used = rc->base_frame_target;
diff --git a/av1/encoder/firstpass.h b/av1/encoder/firstpass.h
index 89ce3fb..f5fe329 100644
--- a/av1/encoder/firstpass.h
+++ b/av1/encoder/firstpass.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_FIRSTPASS_H_
-#define VP10_ENCODER_FIRSTPASS_H_
+#ifndef AV1_ENCODER_FIRSTPASS_H_
+#define AV1_ENCODER_FIRSTPASS_H_
#include "av1/encoder/lookahead.h"
#include "av1/encoder/ratectrl.h"
@@ -139,28 +139,28 @@
GF_GROUP gf_group;
} TWO_PASS;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_init_first_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi);
-void vp10_first_pass(struct VP10_COMP *cpi,
+void av1_init_first_pass(struct AV1_COMP *cpi);
+void av1_rc_get_first_pass_params(struct AV1_COMP *cpi);
+void av1_first_pass(struct AV1_COMP *cpi,
const struct lookahead_entry *source);
-void vp10_end_first_pass(struct VP10_COMP *cpi);
+void av1_end_first_pass(struct AV1_COMP *cpi);
-void vp10_init_second_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi);
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_init_second_pass(struct AV1_COMP *cpi);
+void av1_rc_get_second_pass_params(struct AV1_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
// Post encode update of the rate control parameters for 2-pass
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
-void vp10_init_subsampling(struct VP10_COMP *cpi);
+void av1_init_subsampling(struct AV1_COMP *cpi);
-void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
+void av1_calculate_coded_size(struct AV1_COMP *cpi, int *scaled_frame_width,
int *scaled_frame_height);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_FIRSTPASS_H_
+#endif // AV1_ENCODER_FIRSTPASS_H_
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
index 8206a5e..d245466 100644
--- a/av1/encoder/lookahead.c
+++ b/av1/encoder/lookahead.c
@@ -31,7 +31,7 @@
return buf;
}
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
+void av1_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx) {
if (ctx->buf) {
unsigned int i;
@@ -43,11 +43,11 @@
}
}
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
unsigned int height,
unsigned int subsampling_x,
unsigned int subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
unsigned int depth) {
@@ -70,7 +70,7 @@
for (i = 0; i < depth; i++)
if (aom_alloc_frame_buffer(
&ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
@@ -78,15 +78,15 @@
}
return ctx;
bail:
- vp10_lookahead_destroy(ctx);
+ av1_lookahead_destroy(ctx);
return NULL;
}
#define USE_PARTIAL_COPY 0
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
unsigned int flags) {
@@ -119,7 +119,7 @@
#if USE_PARTIAL_COPY
// TODO(jkoleszar): This is disabled for now, as
- // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+ // av1_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
// Only do this partial copy if the following conditions are all met:
// 1. Lookahead queue has has size of 1.
@@ -146,7 +146,7 @@
}
// Only copy this active region.
- vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+ av1_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
16, (active_end - col) << 4);
// Start again from the end of this active region.
@@ -162,7 +162,7 @@
memset(&new_img, 0, sizeof(new_img));
if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS, 0))
@@ -178,7 +178,7 @@
buf->img.subsampling_y = src->subsampling_y;
}
// Partial copy not implemented yet
- vp10_copy_and_extend_frame(src, &buf->img);
+ av1_copy_and_extend_frame(src, &buf->img);
#if USE_PARTIAL_COPY
}
#endif
@@ -189,7 +189,7 @@
return 0;
}
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
int drain) {
struct lookahead_entry *buf = NULL;
@@ -200,7 +200,7 @@
return buf;
}
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
int index) {
struct lookahead_entry *buf = NULL;
@@ -223,4 +223,4 @@
return buf;
}
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
index 148809a..b0e33ee 100644
--- a/av1/encoder/lookahead.h
+++ b/av1/encoder/lookahead.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_LOOKAHEAD_H_
-#define VP10_ENCODER_LOOKAHEAD_H_
+#ifndef AV1_ENCODER_LOOKAHEAD_H_
+#define AV1_ENCODER_LOOKAHEAD_H_
#include "aom_scale/yv12config.h"
#include "aom/aom_integer.h"
@@ -44,18 +44,18 @@
* The lookahead stage is a queue of frame buffers on which some analysis
* may be done when buffers are enqueued.
*/
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
unsigned int height,
unsigned int subsampling_x,
unsigned int subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
unsigned int depth);
/**\brief Destroys the lookahead stage
*/
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
+void av1_lookahead_destroy(struct lookahead_ctx *ctx);
/**\brief Enqueue a source buffer
*
@@ -72,9 +72,9 @@
* \param[in] flags Flags set on this frame
* \param[in] active_map Map that specifies which macroblock is active
*/
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int use_highbitdepth,
#endif
unsigned int flags);
@@ -89,7 +89,7 @@
* \retval NULL, if drain set and queue is empty
* \retval NULL, if drain not set and queue not of the configured depth
*/
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
int drain);
/**\brief Get a future source buffer to encode
@@ -99,17 +99,17 @@
*
* \retval NULL, if no buffer exists at the specified index
*/
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
int index);
/**\brief Get the number of frames currently in the lookahead queue
*
* \param[in] ctx Pointer to the lookahead context
*/
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx);
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_LOOKAHEAD_H_
+#endif // AV1_ENCODER_LOOKAHEAD_H_
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
index 80d2aef..2196a0e 100644
--- a/av1/encoder/mbgraph.c
+++ b/av1/encoder/mbgraph.c
@@ -23,7 +23,7 @@
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv,
MV *dst_mv, int mb_row,
int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
@@ -42,13 +42,13 @@
int step_param = mv_sf->reduce_first_step_size;
step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
- vp10_set_mv_search_range(x, ref_mv);
+ av1_set_mv_search_range(x, ref_mv);
ref_full.col = ref_mv->col >> 3;
ref_full.row = ref_mv->row >> 3;
/*cpi->sf.search_method == HEX*/
- vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
+ av1_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv, dst_mv);
// Try sub-pixel MC
@@ -66,7 +66,7 @@
xd->mi[0]->mbmi.mode = NEWMV;
xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv;
- vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+ av1_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
/* restore UMV window */
x->mv_col_min = tmp_col_min;
@@ -78,7 +78,7 @@
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
}
-static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv,
+static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv,
int_mv *dst_mv, int mb_row, int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -116,7 +116,7 @@
return err;
}
-static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
+static int do_16x16_zerozero_search(AV1_COMP *cpi, int_mv *dst_mv) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
unsigned int err;
@@ -130,7 +130,7 @@
return err;
}
-static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
+static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
PREDICTION_MODE best_mode = -1, mode;
@@ -142,7 +142,7 @@
unsigned int err;
xd->mi[0]->mbmi.mode = mode;
- vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+ av1_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
x->plane[0].src.stride, xd->plane[0].dst.buf,
xd->plane[0].dst.stride, 0, 0, 0);
err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
@@ -160,7 +160,7 @@
return best_err;
}
-static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+static void update_mbgraph_mb_stats(AV1_COMP *cpi, MBGRAPH_MB_STATS *stats,
YV12_BUFFER_CONFIG *buf, int mb_y_offset,
YV12_BUFFER_CONFIG *golden_ref,
const MV *prev_golden_ref_mv,
@@ -169,7 +169,7 @@
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
// FIXME in practice we're completely ignoring chroma here
x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
@@ -213,21 +213,21 @@
}
}
-static void update_mbgraph_frame_stats(VP10_COMP *cpi,
+static void update_mbgraph_frame_stats(AV1_COMP *cpi,
MBGRAPH_FRAME_STATS *stats,
YV12_BUFFER_CONFIG *buf,
YV12_BUFFER_CONFIG *golden_ref,
YV12_BUFFER_CONFIG *alt_ref) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
MV gld_top_mv = { 0, 0 };
MODE_INFO mi_local;
- vp10_zero(mi_local);
+ av1_zero(mi_local);
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
x->mv_row_min = -BORDER_MV_PIXELS_B16;
@@ -280,8 +280,8 @@
}
// void separate_arf_mbs_byzz
-static void separate_arf_mbs(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+static void separate_arf_mbs(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
int mb_col, mb_row, offset, i;
int mi_row, mi_col;
int ncnt[4] = { 0 };
@@ -347,19 +347,19 @@
else
cpi->static_mb_pct = 0;
- vp10_enable_segmentation(&cm->seg);
+ av1_enable_segmentation(&cm->seg);
} else {
cpi->static_mb_pct = 0;
- vp10_disable_segmentation(&cm->seg);
+ av1_disable_segmentation(&cm->seg);
}
// Free localy allocated storage
aom_free(arf_not_zz);
}
-void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
- int i, n_frames = vp10_lookahead_depth(cpi->lookahead);
+void av1_update_mbgraph_stats(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
+ int i, n_frames = av1_lookahead_depth(cpi->lookahead);
YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
assert(golden_ref != NULL);
@@ -383,7 +383,7 @@
// the ARF MC search backwards, to get optimal results for MV caching
for (i = 0; i < n_frames; i++) {
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i);
+ struct lookahead_entry *q_cur = av1_lookahead_peek(cpi->lookahead, i);
assert(q_cur != NULL);
diff --git a/av1/encoder/mbgraph.h b/av1/encoder/mbgraph.h
index 8c034da..db005e1 100644
--- a/av1/encoder/mbgraph.h
+++ b/av1/encoder/mbgraph.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_MBGRAPH_H_
-#define VP10_ENCODER_MBGRAPH_H_
+#ifndef AV1_ENCODER_MBGRAPH_H_
+#define AV1_ENCODER_MBGRAPH_H_
#ifdef __cplusplus
extern "C" {
@@ -28,12 +28,12 @@
typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_update_mbgraph_stats(struct VP10_COMP *cpi);
+void av1_update_mbgraph_stats(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_MBGRAPH_H_
+#endif // AV1_ENCODER_MBGRAPH_H_
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index a82d151..b3690e8 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -33,7 +33,7 @@
return &buf->buf[mv->row * buf->stride + mv->col];
}
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
@@ -52,7 +52,7 @@
if (x->mv_row_max > row_max) x->mv_row_max = row_max;
}
-int vp10_init_search_range(int size) {
+int av1_init_search_range(int size) {
int sr = 0;
// Minimum search size no matter what the passed in value.
size = VPXMAX(16, size);
@@ -65,11 +65,11 @@
static INLINE int mv_cost(const MV *mv, const int *joint_cost,
int *const comp_cost[2]) {
- return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+ return joint_cost[av1_get_mv_joint(mv)] + comp_cost[0][mv->row] +
comp_cost[1][mv->col];
}
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
int *mvcost[2], int weight) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
@@ -98,7 +98,7 @@
VP9_PROB_COST_SHIFT);
}
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
int len, ss_count = 1;
cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -119,7 +119,7 @@
cfg->searches_per_step = 4;
}
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
+void av1_init3smotion_compensation(search_site_config *cfg, int stride) {
int len, ss_count = 1;
cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -296,7 +296,7 @@
int y_stride, const uint8_t *second_pred, int w, int h, int offset,
int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (second_pred != NULL) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
@@ -325,7 +325,7 @@
}
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return besterr;
}
@@ -353,7 +353,7 @@
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
}
-int vp10_find_best_sub_pixel_tree_pruned_evenmore(
+int av1_find_best_sub_pixel_tree_pruned_evenmore(
const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -404,7 +404,7 @@
tr = br;
tc = bc;
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
hstep >>= 1;
FIRST_LEVEL_CHECKS;
if (eighthiters > 1) {
@@ -422,7 +422,7 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned_more(
+int av1_find_best_sub_pixel_tree_pruned_more(
const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -462,7 +462,7 @@
}
}
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
tr = br;
tc = bc;
hstep >>= 1;
@@ -486,7 +486,7 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned(
+int av1_find_best_sub_pixel_tree_pruned(
const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -548,7 +548,7 @@
tc = bc;
}
- if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+ if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
hstep >>= 1;
FIRST_LEVEL_CHECKS;
if (eighthiters > 1) {
@@ -578,7 +578,7 @@
{ -2, 0 }, { 2, 0 }, { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
};
-int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
+int av1_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
const MV *ref_mv, int allow_hp,
int error_per_bit,
const aom_variance_fn_ptr_t *vfp,
@@ -614,7 +614,7 @@
unsigned int cost_array[5];
int kr, kc;
- if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+ if (!(allow_hp && av1_use_mv_hp(ref_mv)))
if (round == 3) round = 2;
bestmv->row *= 8;
@@ -801,7 +801,7 @@
// candidates as indicated in the num_candidates and candidates arrays
// passed into this function
//
-static int vp10_pattern_search(
+static int av1_pattern_search(
const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
int use_mvcost, const MV *center_mv, MV *best_mv,
@@ -970,7 +970,7 @@
// are 4 1-away neighbors, and cost_list is non-null
// TODO(debargha): Merge this function with the one above. Also remove
// use_mvcost option since it is always 1, to save unnecessary branches.
-static int vp10_pattern_search_sad(
+static int av1_pattern_search_sad(
const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
int use_mvcost, const MV *center_mv, MV *best_mv,
@@ -1246,7 +1246,7 @@
return bestsad;
}
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1262,7 +1262,7 @@
: 0);
}
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const aom_variance_fn_ptr_t *vfp, int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1278,7 +1278,7 @@
: 0);
}
-int vp10_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv, MV *best_mv) {
@@ -1337,12 +1337,12 @@
{ -512, 1024 },
{ -1024, 0 } },
};
- return vp10_pattern_search(
+ return av1_pattern_search(
x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
}
-int vp10_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv, MV *best_mv) {
@@ -1436,12 +1436,12 @@
{ -512, 512 },
{ -1024, 0 } },
};
- return vp10_pattern_search_sad(
+ return av1_pattern_search_sad(
x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
}
-int vp10_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv, MV *best_mv) {
@@ -1541,26 +1541,26 @@
{ -1024, 1024 },
{ -1024, 0 } },
};
- return vp10_pattern_search(
+ return av1_pattern_search(
x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
}
-int vp10_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit,
int do_init_search, // must be zero for fast_hex
int *cost_list, const aom_variance_fn_ptr_t *vfp,
int use_mvcost, const MV *center_mv, MV *best_mv) {
- return vp10_hex_search(
+ return av1_hex_search(
x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
}
-int vp10_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
const aom_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv, MV *best_mv) {
- return vp10_bigdia_search(
+ return av1_bigdia_search(
x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
}
@@ -1655,7 +1655,7 @@
return best_sad;
}
-int vp10_diamond_search_sad_c(const MACROBLOCK *x,
+int av1_diamond_search_sad_c(const MACROBLOCK *x,
const search_site_config *cfg, MV *ref_mv,
MV *best_mv, int search_param, int sad_per_bit,
int *num00, const aom_variance_fn_ptr_t *fn_ptr,
@@ -1865,7 +1865,7 @@
{ -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
};
-unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
+unsigned int av1_int_pro_motion_estimation(const AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row,
int mi_col) {
MACROBLOCKD *xd = &x->e_mbd;
@@ -1888,7 +1888,7 @@
MV this_mv;
const int norm_factor = 3 + (bw >> 5);
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
if (scaled_ref_frame) {
int i;
@@ -1896,10 +1896,10 @@
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
- vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
{
unsigned int this_sad;
tmp_mv->row = 0;
@@ -1997,7 +1997,7 @@
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+int av1_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
int step_param, int sadpb, int further_steps,
int do_refine, int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
@@ -2007,7 +2007,7 @@
int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
step_param, sadpb, &n, fn_ptr, ref_mv);
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
*dst_mv = temp_mv;
// If there won't be more n-step search, check to see if refining search is
@@ -2024,7 +2024,7 @@
step_param + n, sadpb, &num00, fn_ptr,
ref_mv);
if (thissme < INT_MAX)
- thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ thissme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
// check to see if refining search is needed.
if (num00 > further_steps - n) do_refine = 0;
@@ -2040,10 +2040,10 @@
if (do_refine) {
const int search_range = 8;
MV best_mv = *dst_mv;
- thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+ thissme = av1_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
ref_mv);
if (thissme < INT_MAX)
- thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
+ thissme = av1_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
if (thissme < bestsme) {
bestsme = thissme;
*dst_mv = best_mv;
@@ -2062,7 +2062,7 @@
#define MIN_INTERVAL 1
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
-static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
+static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
MV *centre_mv_full, int sadpb, int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv) {
@@ -2109,7 +2109,7 @@
}
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+ bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
*dst_mv = temp_mv;
// Return cost list.
@@ -2119,7 +2119,7 @@
return bestsme;
}
-int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
int sad_per_bit, int distance,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv) {
@@ -2154,7 +2154,7 @@
return best_sad;
}
-int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
int sad_per_bit, int distance,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv) {
@@ -2220,7 +2220,7 @@
return best_sad;
}
-int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
int sad_per_bit, int distance,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv) {
@@ -2310,7 +2310,7 @@
return best_sad;
}
-int vp10_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+int av1_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
@@ -2385,7 +2385,7 @@
// This function is called when we do joint motion search in comp_inter_inter
// mode.
-int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+int av1_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, const uint8_t *second_pred) {
@@ -2433,7 +2433,7 @@
}
#define MIN_EX_SEARCH_LIMIT 128
-static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
+static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
const int max_ex =
VPXMAX(MIN_EX_SEARCH_LIMIT,
@@ -2444,7 +2444,7 @@
(*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
MV *mvp_full, int step_param, int error_per_bit,
int *cost_list, const MV *ref_mv, MV *tmp_mv,
int var_max, int rd) {
@@ -2465,27 +2465,27 @@
switch (method) {
case FAST_DIAMOND:
- var = vp10_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
+ var = av1_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case FAST_HEX:
- var = vp10_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
+ var = av1_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case HEX:
- var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
+ var = av1_hex_search(x, mvp_full, step_param, error_per_bit, 1,
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case SQUARE:
- var = vp10_square_search(x, mvp_full, step_param, error_per_bit, 1,
+ var = av1_square_search(x, mvp_full, step_param, error_per_bit, 1,
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case BIGDIA:
- var = vp10_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
+ var = av1_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case NSTEP:
- var = vp10_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
+ var = av1_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
MAX_MVSEARCH_STEPS - 1 - step_param, 1,
cost_list, fn_ptr, ref_mv, tmp_mv);
@@ -2515,7 +2515,7 @@
}
if (method != NSTEP && rd && var < var_max)
- var = vp10_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1);
+ var = av1_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1);
return var;
}
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index d603288..f3516c2 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_MCOMP_H_
-#define VP10_ENCODER_MCOMP_H_
+#ifndef AV1_ENCODER_MCOMP_H_
+#define AV1_ENCODER_MCOMP_H_
#include "av1/encoder/block.h"
#include "aom_dsp/variance.h"
@@ -43,40 +43,40 @@
int searches_per_step;
} search_site_config;
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride);
+void av1_init3smotion_compensation(search_site_config *cfg, int stride);
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv);
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
int *mvcost[2], int weight);
// Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const aom_variance_fn_ptr_t *vfp, int use_mvcost);
-struct VP10_COMP;
+struct AV1_COMP;
struct SPEED_FEATURES;
-int vp10_init_search_range(int size);
+int av1_init_search_range(int size);
-int vp10_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
+int av1_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
int sad_per_bit, int distance,
const struct aom_variance_vtable *fn_ptr,
const struct mv *center_mv);
// Runs sequence of diamond searches in smaller steps for RD.
-int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
+int av1_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
MV *mvp_full, int step_param, int sadpb,
int further_steps, int do_refine, int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv);
// Perform integral projection based motion estimation.
-unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
+unsigned int av1_int_pro_motion_estimation(const struct AV1_COMP *cpi,
MACROBLOCK *x, BLOCK_SIZE bsize,
int mi_row, int mi_col);
@@ -87,11 +87,11 @@
int use_mvcost, const MV *center_mv,
MV *best_mv);
-integer_mv_pattern_search_fn vp10_hex_search;
-integer_mv_pattern_search_fn vp10_bigdia_search;
-integer_mv_pattern_search_fn vp10_square_search;
-integer_mv_pattern_search_fn vp10_fast_hex_search;
-integer_mv_pattern_search_fn vp10_fast_dia_search;
+integer_mv_pattern_search_fn av1_hex_search;
+integer_mv_pattern_search_fn av1_bigdia_search;
+integer_mv_pattern_search_fn av1_square_search;
+integer_mv_pattern_search_fn av1_fast_hex_search;
+integer_mv_pattern_search_fn av1_fast_dia_search;
typedef int(fractional_mv_step_fp)(
const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
@@ -101,34 +101,34 @@
int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
int h);
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_more;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_evenmore;
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+typedef int (*av1_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
int sad_per_bit, int distance,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv);
-typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
+typedef int (*av1_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
int sad_per_bit, int distance,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv);
-typedef int (*vp10_diamond_search_fn_t)(
+typedef int (*av1_diamond_search_fn_t)(
const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
int search_param, int sad_per_bit, int *num00,
const aom_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
-int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+int av1_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, const uint8_t *second_pred);
-struct VP10_COMP;
+struct AV1_COMP;
-int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
+int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, MV *mvp_full, int step_param,
int error_per_bit, int *cost_list, const MV *ref_mv,
MV *tmp_mv, int var_max, int rd);
@@ -137,4 +137,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_MCOMP_H_
+#endif // AV1_ENCODER_MCOMP_H_
diff --git a/av1/encoder/mips/msa/error_msa.c b/av1/encoder/mips/msa/error_msa.c
index 6707976..1890528 100644
--- a/av1/encoder/mips/msa/error_msa.c
+++ b/av1/encoder/mips/msa/error_msa.c
@@ -87,7 +87,7 @@
BLOCK_ERROR_BLOCKSIZE_MSA(1024)
/* clang-format on */
-int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
+int64_t av1_block_error_msa(const tran_low_t *coeff_ptr,
const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
int64_t *ssz) {
int64_t err;
@@ -100,7 +100,7 @@
case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
default:
- err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
+ err = av1_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
break;
}
diff --git a/av1/encoder/mips/msa/fdct16x16_msa.c b/av1/encoder/mips/msa/fdct16x16_msa.c
index 2664dd6..469b0f9 100644
--- a/av1/encoder/mips/msa/fdct16x16_msa.c
+++ b/av1/encoder/mips/msa/fdct16x16_msa.c
@@ -404,7 +404,7 @@
ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
}
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
int32_t tx_type) {
DECLARE_ALIGNED(32, int16_t, tmp[256]);
DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
diff --git a/av1/encoder/mips/msa/fdct4x4_msa.c b/av1/encoder/mips/msa/fdct4x4_msa.c
index c60c859..1bc7fe4 100644
--- a/av1/encoder/mips/msa/fdct4x4_msa.c
+++ b/av1/encoder/mips/msa/fdct4x4_msa.c
@@ -14,7 +14,7 @@
#include "av1/common/enums.h"
#include "av1/encoder/mips/msa/fdct_msa.h"
-void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
+void av1_fwht4x4_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
v8i16 in0, in1, in2, in3, in4;
@@ -46,7 +46,7 @@
ST4x2_UB(in2, output + 12, 4);
}
-void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
int32_t tx_type) {
v8i16 in0, in1, in2, in3;
diff --git a/av1/encoder/mips/msa/fdct8x8_msa.c b/av1/encoder/mips/msa/fdct8x8_msa.c
index 114cab5..7a2bde6 100644
--- a/av1/encoder/mips/msa/fdct8x8_msa.c
+++ b/av1/encoder/mips/msa/fdct8x8_msa.c
@@ -14,7 +14,7 @@
#include "av1/common/enums.h"
#include "av1/encoder/mips/msa/fdct_msa.h"
-void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
diff --git a/av1/encoder/mips/msa/fdct_msa.h b/av1/encoder/mips/msa/fdct_msa.h
index 37fe131..7f02df0 100644
--- a/av1/encoder/mips/msa/fdct_msa.h
+++ b/av1/encoder/mips/msa/fdct_msa.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
-#define VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#ifndef AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
+#define AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
#include "aom_dsp/mips/fwd_txfm_msa.h"
#include "aom_dsp/mips/txfm_macros_msa.h"
@@ -114,4 +114,4 @@
PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
out0, out1, out2, out3); \
}
-#endif // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#endif // AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
diff --git a/av1/encoder/mips/msa/temporal_filter_msa.c b/av1/encoder/mips/msa/temporal_filter_msa.c
index d690a3e..9f0c8b7 100644
--- a/av1/encoder/mips/msa/temporal_filter_msa.c
+++ b/av1/encoder/mips/msa/temporal_filter_msa.c
@@ -266,7 +266,7 @@
}
}
-void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
+void av1_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
uint8_t *frame2_ptr, uint32_t blk_w,
uint32_t blk_h, int32_t strength,
int32_t filt_wgt, uint32_t *accu,
@@ -278,7 +278,7 @@
temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
filt_wgt, accu, cnt);
} else {
- vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
+ av1_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
strength, filt_wgt, accu, cnt);
}
}
diff --git a/av1/encoder/pickdering.c b/av1/encoder/pickdering.c
index fd9d62d..05c0877 100644
--- a/av1/encoder/pickdering.c
+++ b/av1/encoder/pickdering.c
@@ -33,8 +33,8 @@
return sum/(double)(1 << 2*coeff_shift);
}
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
- VP10_COMMON *cm,
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+ AV1_COMMON *cm,
MACROBLOCKD *xd) {
int r, c;
int sbr, sbc;
@@ -58,7 +58,7 @@
src = aom_malloc(sizeof(*src)*cm->mi_rows*cm->mi_cols*64);
ref_coeff = aom_malloc(sizeof(*ref_coeff)*cm->mi_rows*cm->mi_cols*64);
bskip = aom_malloc(sizeof(*bskip)*cm->mi_rows*cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+ av1_setup_dst_planes(xd->plane, frame, 0, 0);
for (pli = 0; pli < 3; pli++) {
dec[pli] = xd->plane[pli].subsampling_x;
bsize[pli] = 8 >> dec[pli];
@@ -66,7 +66,7 @@
stride = bsize[0]*cm->mi_cols;
for (r = 0; r < bsize[0]*cm->mi_rows; ++r) {
for (c = 0; c < bsize[0]*cm->mi_cols; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
src[r * stride + c] =
CONVERT_TO_SHORTPTR(xd->plane[0].dst.buf)
@@ -78,7 +78,7 @@
src[r * stride + c] =
xd->plane[0].dst.buf[r*xd->plane[0].dst.stride + c];
ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
}
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
index cf3a08f..88788df 100644
--- a/av1/encoder/picklpf.c
+++ b/av1/encoder/picklpf.c
@@ -26,7 +26,7 @@
#include "av1/encoder/picklpf.h"
#include "av1/encoder/quantize.h"
-static int get_max_filter_level(const VP10_COMP *cpi) {
+static int get_max_filter_level(const AV1_COMP *cpi) {
if (cpi->oxcf.pass == 2) {
return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4
: MAX_LOOP_FILTER;
@@ -36,28 +36,28 @@
}
static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
- VP10_COMP *const cpi, int filt_level,
+ AV1_COMP *const cpi, int filt_level,
int partial_frame) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
int64_t filt_err;
if (cpi->num_workers > 1)
- vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+ av1_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
filt_level, 1, partial_frame, cpi->workers,
cpi->num_workers, &cpi->lf_row_sync);
else
- vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+ av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
1, partial_frame);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- filt_err = vp10_highbd_get_y_sse(sd, cm->frame_to_show);
+ filt_err = av1_highbd_get_y_sse(sd, cm->frame_to_show);
} else {
- filt_err = vp10_get_y_sse(sd, cm->frame_to_show);
+ filt_err = av1_get_y_sse(sd, cm->frame_to_show);
}
#else
- filt_err = vp10_get_y_sse(sd, cm->frame_to_show);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ filt_err = av1_get_y_sse(sd, cm->frame_to_show);
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Re-instate the unfiltered frame
aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
@@ -65,9 +65,9 @@
return filt_err;
}
-static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+static int search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
int partial_frame) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const struct loopfilter *const lf = &cm->lf;
const int min_filter_level = 0;
const int max_filter_level = get_max_filter_level(cpi);
@@ -145,9 +145,9 @@
return filt_best;
}
-void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
LPF_PICK_METHOD method) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
struct loopfilter *const lf = &cm->lf;
lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
@@ -157,10 +157,10 @@
} else if (method >= LPF_PICK_FROM_Q) {
const int min_filter_level = 0;
const int max_filter_level = get_max_filter_level(cpi);
- const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+ const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
// These values were determined by linear fitting the result of the
// searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
case VPX_BITS_8:
@@ -180,7 +180,7 @@
}
#else
int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
} else {
diff --git a/av1/encoder/picklpf.h b/av1/encoder/picklpf.h
index 428c944..44c9ee5 100644
--- a/av1/encoder/picklpf.h
+++ b/av1/encoder/picklpf.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_PICKLPF_H_
-#define VP10_ENCODER_PICKLPF_H_
+#ifndef AV1_ENCODER_PICKLPF_H_
+#define AV1_ENCODER_PICKLPF_H_
#ifdef __cplusplus
extern "C" {
@@ -19,12 +19,12 @@
#include "av1/encoder/encoder.h"
struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
- struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+void av1_pick_filter_level(const struct yv12_buffer_config *sd,
+ struct AV1_COMP *cpi, LPF_PICK_METHOD method);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_PICKLPF_H_
+#endif // AV1_ENCODER_PICKLPF_H_
diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c
index a0dd2b7..5ad2456 100644
--- a/av1/encoder/quantize.c
+++ b/av1/encoder/quantize.c
@@ -21,7 +21,7 @@
#include "av1/encoder/quantize.h"
#include "av1/encoder/rd.h"
-void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -77,8 +77,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
const int16_t *quant_ptr,
@@ -137,7 +137,7 @@
// TODO(jingning) Refactor this file and combine functions with similar
// operations.
-void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
const int16_t *quant_ptr,
@@ -199,8 +199,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_32x32_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -259,7 +259,7 @@
}
#endif
-void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
+void av1_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
const int16_t *scan, const int16_t *iscan) {
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
@@ -271,7 +271,7 @@
const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][0];
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
aom_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block,
p->zbin, p->round, p->quant, p->quant_shift,
@@ -309,8 +309,8 @@
}
static int get_qzbin_factor(int q, aom_bit_depth_t bit_depth) {
- const int quant = vp10_dc_quant(q, 0, bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+ const int quant = av1_dc_quant(q, 0, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
case VPX_BITS_8:
return q == 0 ? 64 : (quant < 148 ? 84 : 80);
@@ -328,8 +328,8 @@
#endif
}
-void vp10_init_quantizer(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_init_quantizer(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
QUANTS *const quants = &cpi->quants;
int i, q, quant;
@@ -342,8 +342,8 @@
if (q == 0) qrounding_factor_fp = 64;
// y
- quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
- : vp10_ac_quant(q, 0, cm->bit_depth);
+ quant = i == 0 ? av1_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
+ : av1_ac_quant(q, 0, cm->bit_depth);
invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant);
quants->y_quant_fp[q][i] = (1 << 16) / quant;
quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
@@ -352,8 +352,8 @@
cpi->y_dequant[q][i] = quant;
// uv
- quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
- : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
+ quant = i == 0 ? av1_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
+ : av1_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
quant);
quants->uv_quant_fp[q][i] = (1 << 16) / quant;
@@ -383,13 +383,13 @@
}
}
-void vp10_init_plane_quantizers(VP10_COMP *cpi, MACROBLOCK *x) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_init_plane_quantizers(AV1_COMP *cpi, MACROBLOCK *x) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
QUANTS *const quants = &cpi->quants;
const int segment_id = xd->mi[0]->mbmi.segment_id;
- const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
- const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
+ const int qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ const int rdmult = av1_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
int i;
#if CONFIG_AOM_QM
int minqm = cm->min_qmlevel;
@@ -443,15 +443,15 @@
set_error_per_bit(x, rdmult);
- vp10_initialize_me_consts(cpi, x, x->q_index);
+ av1_initialize_me_consts(cpi, x, x->q_index);
}
-void vp10_frame_init_quantizer(VP10_COMP *cpi) {
- vp10_init_plane_quantizers(cpi, &cpi->td.mb);
+void av1_frame_init_quantizer(AV1_COMP *cpi) {
+ av1_init_plane_quantizers(cpi, &cpi->td.mb);
}
-void vp10_set_quantizer(VP10_COMMON *cm, int q) {
- // quantizer has to be reinitialized with vp10_init_quantizer() if any
+void av1_set_quantizer(AV1_COMMON *cm, int q) {
+ // quantizer has to be reinitialized with av1_init_quantizer() if any
// delta_q changes.
cm->base_qindex = q;
cm->y_dc_delta_q = 0;
@@ -469,11 +469,11 @@
208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
};
-int vp10_quantizer_to_qindex(int quantizer) {
+int av1_quantizer_to_qindex(int quantizer) {
return quantizer_to_qindex[quantizer];
}
-int vp10_qindex_to_quantizer(int qindex) {
+int av1_qindex_to_quantizer(int qindex) {
int quantizer;
for (quantizer = 0; quantizer < 64; ++quantizer)
diff --git a/av1/encoder/quantize.h b/av1/encoder/quantize.h
index a3d252f..c1d5810 100644
--- a/av1/encoder/quantize.h
+++ b/av1/encoder/quantize.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_QUANTIZE_H_
-#define VP10_ENCODER_QUANTIZE_H_
+#ifndef AV1_ENCODER_QUANTIZE_H_
+#define AV1_ENCODER_QUANTIZE_H_
#include "./aom_config.h"
#include "av1/common/quant_common.h"
@@ -39,26 +39,26 @@
DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
} QUANTS;
-void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
+void av1_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
const int16_t *scan, const int16_t *iscan);
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
-void vp10_frame_init_quantizer(struct VP10_COMP *cpi);
+void av1_frame_init_quantizer(struct AV1_COMP *cpi);
-void vp10_init_plane_quantizers(struct VP10_COMP *cpi, MACROBLOCK *x);
+void av1_init_plane_quantizers(struct AV1_COMP *cpi, MACROBLOCK *x);
-void vp10_init_quantizer(struct VP10_COMP *cpi);
+void av1_init_quantizer(struct AV1_COMP *cpi);
-void vp10_set_quantizer(struct VP10Common *cm, int q);
+void av1_set_quantizer(struct AV1Common *cm, int q);
-int vp10_quantizer_to_qindex(int quantizer);
+int av1_quantizer_to_qindex(int quantizer);
-int vp10_qindex_to_quantizer(int qindex);
+int av1_qindex_to_quantizer(int qindex);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_QUANTIZE_H_
+#endif // AV1_ENCODER_QUANTIZE_H_
diff --git a/av1/encoder/ratectrl.c b/av1/encoder/ratectrl.c
index bcb5ab8..79dc74c 100644
--- a/av1/encoder/ratectrl.c
+++ b/av1/encoder/ratectrl.c
@@ -46,7 +46,7 @@
#define FRAME_OVERHEAD_BITS 200
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define ASSIGN_MINQ_TABLE(bit_depth, name) \
do { \
switch (bit_depth) { \
@@ -76,7 +76,7 @@
static int inter_minq_8[QINDEX_RANGE];
static int rtc_minq_8[QINDEX_RANGE];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int kf_low_motion_minq_10[QINDEX_RANGE];
static int kf_high_motion_minq_10[QINDEX_RANGE];
static int arfgf_low_motion_minq_10[QINDEX_RANGE];
@@ -110,7 +110,7 @@
if (minqtarget <= 2.0) return 0;
for (i = 0; i < QINDEX_RANGE; i++) {
- if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
+ if (minqtarget <= av1_convert_qindex_to_q(i, bit_depth)) return i;
}
return QINDEX_RANGE - 1;
@@ -121,7 +121,7 @@
aom_bit_depth_t bit_depth) {
int i;
for (i = 0; i < QINDEX_RANGE; i++) {
- const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
+ const double maxq = av1_convert_qindex_to_q(i, bit_depth);
kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth);
kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth);
@@ -131,11 +131,11 @@
}
}
-void vp10_rc_init_minq_luts(void) {
+void av1_rc_init_minq_luts(void) {
init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8,
arfgf_low_motion_minq_8, arfgf_high_motion_minq_8,
inter_minq_8, rtc_minq_8, VPX_BITS_8);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10,
arfgf_low_motion_minq_10, arfgf_high_motion_minq_10,
inter_minq_10, rtc_minq_10, VPX_BITS_10);
@@ -148,25 +148,25 @@
// These functions use formulaic calculations to make playing with the
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
-double vp10_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
// Convert the index to a real Q value (scaled down to match old Q values)
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
- case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
- case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+ case VPX_BITS_8: return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
+ case VPX_BITS_10: return av1_ac_quant(qindex, 0, bit_depth) / 16.0;
+ case VPX_BITS_12: return av1_ac_quant(qindex, 0, bit_depth) / 64.0;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1.0;
}
#else
- return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+ return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
#endif
}
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
double correction_factor, aom_bit_depth_t bit_depth) {
- const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
+ const double q = av1_convert_qindex_to_q(qindex, bit_depth);
int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
assert(correction_factor <= MAX_BPB_FACTOR &&
@@ -177,18 +177,18 @@
return (int)(enumerator * correction_factor / q);
}
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
+int av1_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
double correction_factor,
aom_bit_depth_t bit_depth) {
const int bpm =
- (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
+ (int)(av1_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
return VPXMAX(FRAME_OVERHEAD_BITS,
(int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
}
-int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_pframe_target_size(const AV1_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
const int min_frame_target =
VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
if (target < min_frame_target) target = min_frame_target;
@@ -209,9 +209,9 @@
return target;
}
-int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_iframe_target_size(const AV1_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
if (oxcf->rc_max_intra_bitrate_pct) {
const int max_rate =
rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
@@ -222,8 +222,8 @@
}
// Update the buffer level: leaky bucket model.
-static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
- const VP10_COMMON *const cm = &cpi->common;
+static void update_buffer_level(AV1_COMP *cpi, int encoded_frame_size) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
// Non-viewable frames are a special case and are treated as pure overhead.
@@ -238,7 +238,7 @@
rc->buffer_level = rc->bits_off_target;
}
-int vp10_rc_get_default_min_gf_interval(int width, int height,
+int av1_rc_get_default_min_gf_interval(int width, int height,
double framerate) {
// Assume we do not need any constraint lower than 4K 20 fps
static const double factor_safe = 3840 * 2160 * 20.0;
@@ -257,13 +257,13 @@
// 4K60: 12
}
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
+int av1_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
interval += (interval & 0x01); // Round to even value
return VPXMAX(interval, min_gf_interval);
}
-void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
+void av1_rc_init(const AV1EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
int i;
if (pass == 0 && oxcf->rc_mode == VPX_CBR) {
@@ -303,7 +303,7 @@
rc->ni_frames = 0;
rc->tot_q = 0.0;
- rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
+ rc->avg_q = av1_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
rc->rate_correction_factors[i] = 1.0;
@@ -312,16 +312,16 @@
rc->min_gf_interval = oxcf->min_gf_interval;
rc->max_gf_interval = oxcf->max_gf_interval;
if (rc->min_gf_interval == 0)
- rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+ rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
oxcf->width, oxcf->height, oxcf->init_framerate);
if (rc->max_gf_interval == 0)
- rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+ rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
oxcf->init_framerate, rc->min_gf_interval);
rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
}
-int vp10_rc_drop_frame(VP10_COMP *cpi) {
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+int av1_rc_drop_frame(AV1_COMP *cpi) {
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
if (!oxcf->drop_frames_water_mark) {
@@ -356,7 +356,7 @@
}
}
-static double get_rate_correction_factor(const VP10_COMP *cpi) {
+static double get_rate_correction_factor(const AV1_COMP *cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
double rcf;
@@ -378,7 +378,7 @@
return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
}
-static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
+static void set_rate_correction_factor(AV1_COMP *cpi, double factor) {
RATE_CONTROL *const rc = &cpi->rc;
// Normalize RCF to account for the size-dependent scaling factor.
@@ -402,8 +402,8 @@
}
}
-void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_update_rate_correction_factors(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
int correction_factor = 100;
double rate_correction_factor = get_rate_correction_factor(cpi);
double adjustment_limit;
@@ -421,10 +421,10 @@
// Stay in double to avoid int overflow when values are large
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) {
projected_size_based_on_q =
- vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
+ av1_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
} else {
projected_size_based_on_q =
- vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
+ av1_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
cm->MBs, rate_correction_factor, cm->bit_depth);
}
// Work out a size correction factor.
@@ -469,9 +469,9 @@
set_rate_correction_factor(cpi, rate_correction_factor);
}
-int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
+int av1_rc_regulate_q(const AV1_COMP *cpi, int target_bits_per_frame,
int active_best_quality, int active_worst_quality) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
int q = active_worst_quality;
int last_error = INT_MAX;
int i, target_bits_per_mb, bits_per_mb_at_this_q;
@@ -487,9 +487,9 @@
do {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
bits_per_mb_at_this_q =
- (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
+ (int)av1_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
} else {
- bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+ bits_per_mb_at_this_q = (int)av1_rc_bits_per_mb(
cm->frame_type, i, correction_factor, cm->bit_depth);
}
@@ -551,7 +551,7 @@
arfgf_low_motion_minq, arfgf_high_motion_minq);
}
-static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_vbr(const AV1_COMP *cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
const unsigned int curr_frame = cpi->common.current_video_frame;
int active_worst_quality;
@@ -573,13 +573,13 @@
}
// Adjust active_worst_quality level based on buffer level.
-static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_cbr(const AV1_COMP *cpi) {
// Adjust active_worst_quality: If buffer is above the optimal/target level,
// bring active_worst_quality down depending on fullness of buffer.
// If buffer is below the optimal level, let the active_worst_quality go from
// ambient Q (at buffer = optimal level) to worst_quality level
// (at buffer = critical level).
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *rc = &cpi->rc;
// Buffer level below which we push active_worst to worst_quality.
int64_t critical_level = rc->optimal_buffer_level >> 3;
@@ -628,10 +628,10 @@
return active_worst_quality;
}
-static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_cbr(const AV1_COMP *cpi,
int *bottom_index,
int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
int active_best_quality;
int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
@@ -646,8 +646,8 @@
// based on the ambient Q to reduce the risk of popping.
if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
- double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(
+ double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (cm->current_video_frame > 0) {
@@ -665,9 +665,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -711,7 +711,7 @@
!(cm->current_video_frame == 0)) {
int qdelta = 0;
aom_clear_system_state();
- qdelta = vp10_compute_qdelta_by_rate(
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
*top_index = active_worst_quality + qdelta;
*top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
@@ -722,7 +722,7 @@
if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
@@ -740,7 +740,7 @@
}
static int get_active_cq_level(const RATE_CONTROL *rc,
- const VP10EncoderConfig *const oxcf) {
+ const AV1EncoderConfig *const oxcf) {
static const double cq_adjust_threshold = 0.1;
int active_cq_level = oxcf->cq_level;
if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
@@ -752,12 +752,12 @@
return active_cq_level;
}
-static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_vbr(const AV1_COMP *cpi,
int *bottom_index,
int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int cq_level = get_active_cq_level(rc, oxcf);
int active_best_quality;
int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi);
@@ -768,13 +768,13 @@
if (frame_is_intra_only(cm)) {
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
- double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(
+ double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ int delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
@@ -792,9 +792,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -818,12 +818,12 @@
} else if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
int delta_qindex;
if (cpi->refresh_alt_ref_frame)
- delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
else
- delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -831,10 +831,10 @@
} else {
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
- double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
0.70, 1.0, 0.85, 1.0 };
- int delta_qindex = vp10_compute_qdelta(
+ int delta_qindex = av1_compute_qdelta(
rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
@@ -869,11 +869,11 @@
// Limit Q range for the adaptive loop.
if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
- qdelta = vp10_compute_qdelta_by_rate(
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- qdelta = vp10_compute_qdelta_by_rate(
+ qdelta = av1_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
}
*top_index = active_worst_quality + qdelta;
@@ -887,7 +887,7 @@
} else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
@@ -905,7 +905,7 @@
return q;
}
-int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
+int av1_frame_type_qdelta(const AV1_COMP *cpi, int rf_level, int q) {
static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = {
1.00, // INTER_NORMAL
1.00, // INTER_HIGH
@@ -916,19 +916,19 @@
static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = {
INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME
};
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
int qdelta =
- vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+ av1_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
rate_factor_deltas[rf_level], cm->bit_depth);
return qdelta;
}
#define STATIC_MOTION_THRESH 95
-static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_two_pass(const AV1_COMP *cpi,
int *bottom_index, int *top_index) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const GF_GROUP *gf_group = &cpi->twopass.gf_group;
const int cq_level = get_active_cq_level(rc, oxcf);
int active_best_quality;
@@ -949,15 +949,15 @@
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
active_best_quality = qindex;
- last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(
+ last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
active_worst_quality =
VPXMIN(qindex + delta_qindex, active_worst_quality);
} else {
qindex = rc->last_boosted_qindex;
- last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(
+ last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = av1_compute_qdelta(
rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
}
@@ -979,9 +979,9 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
- q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
active_best_quality +=
- vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1053,7 +1053,7 @@
// Static forced key frames Q restrictions dealt with elsewhere.
if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
(cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
- int qdelta = vp10_frame_type_qdelta(
+ int qdelta = av1_frame_type_qdelta(
cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
active_worst_quality =
VPXMAX(active_worst_quality + qdelta, active_best_quality);
@@ -1062,7 +1062,7 @@
// Modify active_best_quality for downscaled normal frames.
if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
- int qdelta = vp10_compute_qdelta_by_rate(
+ int qdelta = av1_compute_qdelta_by_rate(
rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
active_best_quality =
VPXMAX(active_best_quality + qdelta, rc->best_quality);
@@ -1084,7 +1084,7 @@
q = rc->last_boosted_qindex;
}
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
active_worst_quality);
if (q > active_worst_quality) {
// Special case when we are targeting the max allowed rate.
@@ -1106,7 +1106,7 @@
return q;
}
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
+int av1_rc_pick_q_and_bounds(const AV1_COMP *cpi, int *bottom_index,
int *top_index) {
int q;
if (cpi->oxcf.pass == 0) {
@@ -1121,7 +1121,7 @@
return q;
}
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
+void av1_rc_compute_frame_size_bounds(const AV1_COMP *cpi, int frame_target,
int *frame_under_shoot_limit,
int *frame_over_shoot_limit) {
if (cpi->oxcf.rc_mode == VPX_Q) {
@@ -1137,8 +1137,8 @@
}
}
-void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
- const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_set_frame_target(AV1_COMP *cpi, int target) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
rc->this_frame_target = target;
@@ -1154,7 +1154,7 @@
((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
}
-static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
+static void update_alt_ref_frame_stats(AV1_COMP *cpi) {
// this frame refreshes means next frames don't unless specified by user
RATE_CONTROL *const rc = &cpi->rc;
rc->frames_since_golden = 0;
@@ -1166,7 +1166,7 @@
rc->source_alt_ref_active = 1;
}
-static void update_golden_frame_stats(VP10_COMP *cpi) {
+static void update_golden_frame_stats(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
// Update the Golden frame usage counts.
@@ -1195,21 +1195,21 @@
}
}
-void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
- const VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_postencode_update(AV1_COMP *cpi, uint64_t bytes_used) {
+ const AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
const int qindex = cm->base_qindex;
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
- vp10_cyclic_refresh_postencode(cpi);
+ av1_cyclic_refresh_postencode(cpi);
}
// Update rate control heuristics
rc->projected_frame_size = (int)(bytes_used << 3);
// Post encode loop adjustment of Q prediction.
- vp10_rc_update_rate_correction_factors(cpi);
+ av1_rc_update_rate_correction_factors(cpi);
// Keep a record of last Q and ambient average Q.
if (cm->frame_type == KEY_FRAME) {
@@ -1223,7 +1223,7 @@
rc->avg_frame_qindex[INTER_FRAME] =
ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
rc->ni_frames++;
- rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+ rc->tot_q += av1_convert_qindex_to_q(qindex, cm->bit_depth);
rc->avg_q = rc->tot_q / rc->ni_frames;
// Calculate the average Q for normal inter frames (not key or GFU
// frames).
@@ -1288,7 +1288,7 @@
}
}
-void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
+void av1_rc_postencode_update_drop_frame(AV1_COMP *cpi) {
// Update buffer level with zero size, update frame counters, and return.
update_buffer_level(cpi, 0);
cpi->rc.frames_since_key++;
@@ -1300,7 +1300,7 @@
// Use this macro to turn on/off use of alt-refs in one-pass mode.
#define USE_ALTREF_FOR_ONE_PASS 1
-static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_pframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
static const int af_ratio = 10;
const RATE_CONTROL *const rc = &cpi->rc;
int target;
@@ -1315,18 +1315,18 @@
#else
target = rc->avg_frame_bandwidth;
#endif
- return vp10_rc_clamp_pframe_target_size(cpi, target);
+ return av1_rc_clamp_pframe_target_size(cpi, target);
}
-static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_iframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
static const int kf_ratio = 25;
const RATE_CONTROL *rc = &cpi->rc;
const int target = rc->avg_frame_bandwidth * kf_ratio;
- return vp10_rc_clamp_iframe_target_size(cpi, target);
+ return av1_rc_clamp_iframe_target_size(cpi, target);
}
-void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_vbr_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1360,11 +1360,11 @@
target = calc_iframe_target_size_one_pass_vbr(cpi);
else
target = calc_pframe_target_size_one_pass_vbr(cpi);
- vp10_rc_set_frame_target(cpi, target);
+ av1_rc_set_frame_target(cpi, target);
}
-static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
- const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static int calc_pframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
const RATE_CONTROL *rc = &cpi->rc;
const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
@@ -1402,7 +1402,7 @@
return VPXMAX(min_frame_target, target);
}
-static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_iframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
const RATE_CONTROL *rc = &cpi->rc;
int target;
if (cpi->common.current_video_frame == 0) {
@@ -1419,11 +1419,11 @@
}
target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
}
- return vp10_rc_clamp_iframe_target_size(cpi, target);
+ return av1_rc_clamp_iframe_target_size(cpi, target);
}
-void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_cbr_params(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1440,7 +1440,7 @@
}
if (rc->frames_till_gf_update_due == 0) {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
- vp10_cyclic_refresh_set_golden_update(cpi);
+ av1_cyclic_refresh_set_golden_update(cpi);
else
rc->baseline_gf_interval =
(rc->min_gf_interval + rc->max_gf_interval) / 2;
@@ -1455,21 +1455,21 @@
// Any update/change of global cyclic refresh parameters (amount/delta-qp)
// should be done here, before the frame qp is selected.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
- vp10_cyclic_refresh_update_parameters(cpi);
+ av1_cyclic_refresh_update_parameters(cpi);
if (cm->frame_type == KEY_FRAME)
target = calc_iframe_target_size_one_pass_cbr(cpi);
else
target = calc_pframe_target_size_one_pass_cbr(cpi);
- vp10_rc_set_frame_target(cpi, target);
+ av1_rc_set_frame_target(cpi, target);
if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
- cpi->resize_pending = vp10_resize_one_pass_cbr(cpi);
+ cpi->resize_pending = av1_resize_one_pass_cbr(cpi);
else
cpi->resize_pending = 0;
}
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
aom_bit_depth_t bit_depth) {
int start_index = rc->worst_quality;
int target_index = rc->worst_quality;
@@ -1478,19 +1478,19 @@
// Convert the average q value to an index.
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
start_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= qstart) break;
}
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
target_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
+ if (av1_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
}
return target_index - start_index;
}
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
int qindex, double rate_target_ratio,
aom_bit_depth_t bit_depth) {
int target_index = rc->worst_quality;
@@ -1498,14 +1498,14 @@
// Look up the current projected bits per block for the base index
const int base_bits_per_mb =
- vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
+ av1_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
// Find the target bits per mb based on the base value and given ratio.
const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
- if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
+ if (av1_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
target_bits_per_mb) {
target_index = i;
break;
@@ -1514,9 +1514,9 @@
return target_index - qindex;
}
-void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
+void av1_rc_set_gf_interval_range(const AV1_COMP *const cpi,
RATE_CONTROL *const rc) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
// Special case code for 1 pass fixed Q mode tests
if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
@@ -1528,10 +1528,10 @@
rc->max_gf_interval = oxcf->max_gf_interval;
rc->min_gf_interval = oxcf->min_gf_interval;
if (rc->min_gf_interval == 0)
- rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+ rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
oxcf->width, oxcf->height, cpi->framerate);
if (rc->max_gf_interval == 0)
- rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+ rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
cpi->framerate, rc->min_gf_interval);
// Extended interval for genuinely static scenes
@@ -1550,9 +1550,9 @@
}
}
-void vp10_rc_update_framerate(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_update_framerate(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
int vbr_max_bits;
@@ -1576,12 +1576,12 @@
rc->max_frame_bandwidth =
VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
- vp10_rc_set_gf_interval_range(cpi, rc);
+ av1_rc_set_gf_interval_range(cpi, rc);
}
#define VBR_PCT_ADJUSTMENT_LIMIT 50
// For VBR...adjustment to the frame target based on error from previous frames
-static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
+static void vbr_rate_correction(AV1_COMP *cpi, int *this_frame_target) {
RATE_CONTROL *const rc = &cpi->rc;
int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
int max_delta;
@@ -1623,20 +1623,20 @@
}
}
-void vp10_set_target_rate(VP10_COMP *cpi) {
+void av1_set_target_rate(AV1_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
int target_rate = rc->base_frame_target;
// Correction to rate target based on prior over or under shoot.
if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
vbr_rate_correction(cpi, &target_rate);
- vp10_rc_set_frame_target(cpi, target_rate);
+ av1_rc_set_frame_target(cpi, target_rate);
}
// Check if we should resize, based on average QP from past x frames.
// Only allow for resize at most one scale down for now, scaling factor is 2.
-int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
- const VP10_COMMON *const cm = &cpi->common;
+int av1_resize_one_pass_cbr(AV1_COMP *cpi) {
+ const AV1_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int resize_now = 0;
cpi->resize_scale_num = 1;
@@ -1695,14 +1695,14 @@
rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
// Reset cyclic refresh parameters.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
- vp10_cyclic_refresh_reset_resize(cpi);
+ av1_cyclic_refresh_reset_resize(cpi);
// Get the projected qindex, based on the scaled target frame size (scaled
- // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
+ // so target_bits_per_mb in av1_rc_regulate_q will be correct target).
target_bits_per_frame = (resize_now == 1)
? rc->this_frame_target * tot_scale_change
: rc->this_frame_target / tot_scale_change;
active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
- qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+ qindex = av1_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
active_worst_quality);
// If resize is down, check if projected q index is close to worst_quality,
// and if so, reduce the rate correction factor (since likely can afford
diff --git a/av1/encoder/ratectrl.h b/av1/encoder/ratectrl.h
index 410cebf..3869312 100644
--- a/av1/encoder/ratectrl.h
+++ b/av1/encoder/ratectrl.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_RATECTRL_H_
-#define VP10_ENCODER_RATECTRL_H_
+#ifndef AV1_ENCODER_RATECTRL_H_
+#define AV1_ENCODER_RATECTRL_H_
#include "aom/aom_codec.h"
#include "aom/aom_integer.h"
@@ -145,118 +145,118 @@
int rf_level_maxq[RATE_FACTOR_LEVELS];
} RATE_CONTROL;
-struct VP10_COMP;
-struct VP10EncoderConfig;
+struct AV1_COMP;
+struct AV1EncoderConfig;
-void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
+void av1_rc_init(const struct AV1EncoderConfig *oxcf, int pass,
RATE_CONTROL *rc);
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+int av1_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
double correction_factor,
aom_bit_depth_t bit_depth);
-double vp10_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
-void vp10_rc_init_minq_luts(void);
+void av1_rc_init_minq_luts(void);
-int vp10_rc_get_default_min_gf_interval(int width, int height,
+int av1_rc_get_default_min_gf_interval(int width, int height,
double framerate);
-// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
+// Note av1_rc_get_default_max_gf_interval() requires the min_gf_interval to
// be passed in to ensure that the max_gf_interval returned is at least as bis
// as that.
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+int av1_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
// Generally at the high level, the following flow is expected
// to be enforced for rate control:
// First call per frame, one of:
-// vp10_rc_get_one_pass_vbr_params()
-// vp10_rc_get_one_pass_cbr_params()
-// vp10_rc_get_first_pass_params()
-// vp10_rc_get_second_pass_params()
+// av1_rc_get_one_pass_vbr_params()
+// av1_rc_get_one_pass_cbr_params()
+// av1_rc_get_first_pass_params()
+// av1_rc_get_second_pass_params()
// depending on the usage to set the rate control encode parameters desired.
//
// Then, call encode_frame_to_data_rate() to perform the
// actual encode. This function will in turn call encode_frame()
// one or more times, followed by one of:
-// vp10_rc_postencode_update()
-// vp10_rc_postencode_update_drop_frame()
+// av1_rc_postencode_update()
+// av1_rc_postencode_update_drop_frame()
//
// The majority of rate control parameters are only expected
-// to be set in the vp10_rc_get_..._params() functions and
-// updated during the vp10_rc_postencode_update...() functions.
-// The only exceptions are vp10_rc_drop_frame() and
-// vp10_rc_update_rate_correction_factors() functions.
+// to be set in the av1_rc_get_..._params() functions and
+// updated during the av1_rc_postencode_update...() functions.
+// The only exceptions are av1_rc_drop_frame() and
+// av1_rc_update_rate_correction_factors() functions.
// Functions to set parameters for encoding before the actual
// encode_frame_to_data_rate() function.
-void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi);
-void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi);
+void av1_rc_get_one_pass_vbr_params(struct AV1_COMP *cpi);
+void av1_rc_get_one_pass_cbr_params(struct AV1_COMP *cpi);
// Post encode update of the rate control parameters based
// on bytes used
-void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used);
+void av1_rc_postencode_update(struct AV1_COMP *cpi, uint64_t bytes_used);
// Post encode update of the rate control parameters for dropped frames
-void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi);
+void av1_rc_postencode_update_drop_frame(struct AV1_COMP *cpi);
// Updates rate correction factors
// Changes only the rate correction factors in the rate control structure.
-void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi);
+void av1_rc_update_rate_correction_factors(struct AV1_COMP *cpi);
// Decide if we should drop this frame: For 1-pass CBR.
// Changes only the decimation count in the rate control structure
-int vp10_rc_drop_frame(struct VP10_COMP *cpi);
+int av1_rc_drop_frame(struct AV1_COMP *cpi);
// Computes frame size bounds.
-void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
+void av1_rc_compute_frame_size_bounds(const struct AV1_COMP *cpi,
int this_frame_target,
int *frame_under_shoot_limit,
int *frame_over_shoot_limit);
// Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
+int av1_rc_pick_q_and_bounds(const struct AV1_COMP *cpi, int *bottom_index,
int *top_index);
// Estimates q to achieve a target bits per frame
-int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
+int av1_rc_regulate_q(const struct AV1_COMP *cpi, int target_bits_per_frame,
int active_best_quality, int active_worst_quality);
// Estimates bits per mb for a given qindex and correction factor.
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
double correction_factor, aom_bit_depth_t bit_depth);
// Clamping utilities for bitrate targets for iframes and pframes.
-int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
+int av1_rc_clamp_iframe_target_size(const struct AV1_COMP *const cpi,
int target);
-int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
+int av1_rc_clamp_pframe_target_size(const struct AV1_COMP *const cpi,
int target);
// Utility to set frame_target into the RATE_CONTROL structure
-// This function is called only from the vp10_rc_get_..._params() functions.
-void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
+// This function is called only from the av1_rc_get_..._params() functions.
+void av1_rc_set_frame_target(struct AV1_COMP *cpi, int target);
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a target q value
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
aom_bit_depth_t bit_depth);
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a value that should equate to the given rate ratio.
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
int qindex, double rate_target_ratio,
aom_bit_depth_t bit_depth);
-int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
+int av1_frame_type_qdelta(const struct AV1_COMP *cpi, int rf_level, int q);
-void vp10_rc_update_framerate(struct VP10_COMP *cpi);
+void av1_rc_update_framerate(struct AV1_COMP *cpi);
-void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
+void av1_rc_set_gf_interval_range(const struct AV1_COMP *const cpi,
RATE_CONTROL *const rc);
-void vp10_set_target_rate(struct VP10_COMP *cpi);
+void av1_set_target_rate(struct AV1_COMP *cpi);
-int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi);
+int av1_resize_one_pass_cbr(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RATECTRL_H_
+#endif // AV1_ENCODER_RATECTRL_H_
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 0d12f2d..8b6d521 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -46,13 +46,13 @@
// Factor to weigh the rate for switchable interp filters.
#define SWITCHABLE_INTERP_RATE_FACTOR 1
-void vp10_rd_cost_reset(RD_COST *rd_cost) {
+void av1_rd_cost_reset(RD_COST *rd_cost) {
rd_cost->rate = INT_MAX;
rd_cost->dist = INT64_MAX;
rd_cost->rdcost = INT64_MAX;
}
-void vp10_rd_cost_init(RD_COST *rd_cost) {
+void av1_rd_cost_init(RD_COST *rd_cost) {
rd_cost->rate = 0;
rd_cost->dist = 0;
rd_cost->rdcost = 0;
@@ -66,38 +66,38 @@
2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32
};
-static void fill_mode_costs(VP10_COMP *cpi) {
+static void fill_mode_costs(AV1_COMP *cpi) {
const FRAME_CONTEXT *const fc = cpi->common.fc;
int i, j;
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
- vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
- vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->y_mode_costs[i][j], av1_kf_y_mode_prob[i][j],
+ av1_intra_mode_tree);
- vp10_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], av1_intra_mode_tree);
for (i = 0; i < INTRA_MODES; ++i)
- vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
- vp10_intra_mode_tree);
+ av1_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+ av1_intra_mode_tree);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- vp10_cost_tokens(cpi->switchable_interp_costs[i],
+ av1_cost_tokens(cpi->switchable_interp_costs[i],
fc->switchable_interp_prob[i],
- vp10_switchable_interp_tree);
+ av1_switchable_interp_tree);
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
- fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
+ av1_cost_tokens(cpi->intra_tx_type_costs[i][j],
+ fc->intra_ext_tx_prob[i][j], av1_ext_tx_tree);
}
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
- vp10_ext_tx_tree);
+ av1_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
+ av1_ext_tx_tree);
}
}
-static void fill_token_costs(vp10_coeff_cost *c,
- vp10_coeff_probs_model (*p)[PLANE_TYPES]) {
+static void fill_token_costs(av1_coeff_cost *c,
+ av1_coeff_probs_model (*p)[PLANE_TYPES]) {
int i, j, k, l;
TX_SIZE t;
for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -106,10 +106,10 @@
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
aom_prob probs[ENTROPY_NODES];
- vp10_model_to_full_probs(p[t][i][j][k][l], probs);
- vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
- vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
- vp10_coef_tree);
+ av1_model_to_full_probs(p[t][i][j][k][l], probs);
+ av1_cost_tokens((int *)c[t][i][j][k][0][l], probs, av1_coef_tree);
+ av1_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+ av1_coef_tree);
assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
c[t][i][j][k][1][l][EOB_TOKEN]);
}
@@ -119,7 +119,7 @@
static int sad_per_bit16lut_8[QINDEX_RANGE];
static int sad_per_bit4lut_8[QINDEX_RANGE];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int sad_per_bit16lut_10[QINDEX_RANGE];
static int sad_per_bit4lut_10[QINDEX_RANGE];
static int sad_per_bit16lut_12[QINDEX_RANGE];
@@ -133,16 +133,16 @@
// This is to make it easier to resolve the impact of experimental changes
// to the quantizer tables.
for (i = 0; i < range; i++) {
- const double q = vp10_convert_qindex_to_q(i, bit_depth);
+ const double q = av1_convert_qindex_to_q(i, bit_depth);
bit16lut[i] = (int)(0.0418 * q + 2.4107);
bit4lut[i] = (int)(0.063 * q + 2.742);
}
}
-void vp10_init_me_luts(void) {
+void av1_init_me_luts(void) {
init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
VPX_BITS_8);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
VPX_BITS_10);
init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
@@ -155,9 +155,9 @@
static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
128, 144 };
-int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
- const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+int av1_compute_rd_mult(const AV1_COMP *cpi, int qindex) {
+ const int64_t q = av1_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
int64_t rdmult = 0;
switch (cpi->common.bit_depth) {
case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
@@ -169,7 +169,7 @@
}
#else
int64_t rdmult = 88 * q * q / 24;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
@@ -184,25 +184,25 @@
static int compute_rd_thresh_factor(int qindex, aom_bit_depth_t bit_depth) {
double q;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
- case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
- case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
+ case VPX_BITS_8: q = av1_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+ case VPX_BITS_10: q = av1_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+ case VPX_BITS_12: q = av1_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
}
#else
(void)bit_depth;
- q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ q = av1_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
+#endif // CONFIG_AOM_HIGHBITDEPTH
// TODO(debargha): Adjust the function below.
return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
}
-void vp10_initialize_me_consts(VP10_COMP *cpi, MACROBLOCK *x, int qindex) {
-#if CONFIG_VPX_HIGHBITDEPTH
+void av1_initialize_me_consts(AV1_COMP *cpi, MACROBLOCK *x, int qindex) {
+#if CONFIG_AOM_HIGHBITDEPTH
switch (cpi->common.bit_depth) {
case VPX_BITS_8:
x->sadperbit16 = sad_per_bit16lut_8[qindex];
@@ -223,15 +223,15 @@
(void)cpi;
x->sadperbit16 = sad_per_bit16lut_8[qindex];
x->sadperbit4 = sad_per_bit4lut_8[qindex];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
-static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
+static void set_block_thresholds(const AV1_COMMON *cm, RD_OPT *rd) {
int i, bsize, segment_id;
for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
const int qindex =
- clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+ clamp(av1_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
cm->y_dc_delta_q,
0, MAXQ);
const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
@@ -258,8 +258,8 @@
}
}
-void vp10_initialize_rd_consts(VP10_COMP *cpi) {
- VP10_COMMON *const cm = &cpi->common;
+void av1_initialize_rd_consts(AV1_COMP *cpi) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
RD_OPT *const rd = &cpi->rd;
int i;
@@ -267,7 +267,7 @@
aom_clear_system_state();
rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
- rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ rd->RDMULT = av1_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
set_error_per_bit(x, rd->RDMULT);
@@ -283,21 +283,21 @@
if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
cm->frame_type == KEY_FRAME) {
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
- vp10_partition_tree);
+ av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+ av1_partition_tree);
}
fill_mode_costs(cpi);
if (!frame_is_intra_only(cm)) {
- vp10_build_nmv_cost_table(
+ av1_build_nmv_cost_table(
x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
cm->allow_high_precision_mv);
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
- cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+ av1_cost_tokens((int *)cpi->inter_mode_cost[i],
+ cm->fc->inter_mode_probs[i], av1_inter_mode_tree);
}
}
@@ -367,7 +367,7 @@
*d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
}
-void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
+void av1_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
unsigned int qstep, int *rate,
int64_t *dist) {
// This function models the rate and distortion for a Laplacian
@@ -391,7 +391,7 @@
}
}
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
const struct macroblockd_plane *pd,
ENTROPY_CONTEXT t_above[16],
ENTROPY_CONTEXT t_left[16]) {
@@ -429,7 +429,7 @@
}
}
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
@@ -482,7 +482,7 @@
x->pred_mv_sad[ref_frame] = best_sad;
}
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
+void av1_setup_pred_block(const MACROBLOCKD *xd,
struct buf_2d dst[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col, const struct scale_factors *scale,
@@ -502,7 +502,7 @@
}
}
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
int stride) {
const int bw = b_width_log2_lookup[plane_bsize];
const int y = 4 * (raster_block >> bw);
@@ -510,15 +510,15 @@
return y * stride + x;
}
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
int raster_block, int16_t *base) {
const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
+ return base + av1_raster_block_offset(plane_bsize, raster_block, stride);
}
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const AV1_COMP *cpi,
int ref_frame) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
@@ -526,15 +526,15 @@
: NULL;
}
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
+int av1_get_switchable_rate(const AV1_COMP *cpi,
const MACROBLOCKD *const xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
}
-void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds(AV1_COMP *cpi) {
int i;
RD_OPT *const rd = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
@@ -588,7 +588,7 @@
rd->thresh_mult[THR_D63_PRED] += 2500;
}
-void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds_sub8x8(AV1_COMP *cpi) {
static const int thresh_mult[2][MAX_REFS] = {
{ 2500, 2500, 2500, 4500, 4500, 2500 },
{ 2000, 2000, 2000, 4000, 4000, 2000 }
@@ -598,7 +598,7 @@
memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
}
-void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
+void av1_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
int bsize, int best_mode_index) {
if (rd_thresh > 0) {
const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
@@ -619,10 +619,10 @@
}
}
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
aom_bit_depth_t bit_depth) {
- const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+ const int q = av1_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth) {
case VPX_BITS_8: return 20 * q;
case VPX_BITS_10: return 5 * q;
@@ -633,5 +633,5 @@
}
#else
return 20 * q;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 9d253c3..3f1cc98 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_RD_H_
-#define VP10_ENCODER_RD_H_
+#ifndef AV1_ENCODER_RD_H_
+#define AV1_ENCODER_RD_H_
#include <limits.h>
@@ -43,7 +43,7 @@
#define RD_THRESH_INC 1
// This enumerator type needs to be kept aligned with the mode order in
-// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
+// const MODE_DEFINITION av1_mode_order[MAX_MODES] used in the rd code.
typedef enum {
THR_NEARESTMV,
THR_NEARESTA,
@@ -120,50 +120,50 @@
} RD_COST;
// Reset the rate distortion cost values to maximum (invalid) value.
-void vp10_rd_cost_reset(RD_COST *rd_cost);
+void av1_rd_cost_reset(RD_COST *rd_cost);
// Initialize the rate distortion cost values to zero.
-void vp10_rd_cost_init(RD_COST *rd_cost);
+void av1_rd_cost_init(RD_COST *rd_cost);
struct TileInfo;
struct TileDataEnc;
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
-int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
+int av1_compute_rd_mult(const struct AV1_COMP *cpi, int qindex);
-void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
+void av1_initialize_rd_consts(struct AV1_COMP *cpi);
-void vp10_initialize_me_consts(struct VP10_COMP *cpi, MACROBLOCK *x,
+void av1_initialize_me_consts(struct AV1_COMP *cpi, MACROBLOCK *x,
int qindex);
-void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
+void av1_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
unsigned int qstep, int *rate,
int64_t *dist);
-int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
+int av1_get_switchable_rate(const struct AV1_COMP *cpi,
const MACROBLOCKD *const xd);
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
int stride);
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
int raster_block, int16_t *base);
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const struct AV1_COMP *cpi,
int ref_frame);
-void vp10_init_me_luts(void);
+void av1_init_me_luts(void);
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
const struct macroblockd_plane *pd,
ENTROPY_CONTEXT t_above[16],
ENTROPY_CONTEXT t_left[16]);
-void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds(struct AV1_COMP *cpi);
-void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds_sub8x8(struct AV1_COMP *cpi);
-void vp10_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
+void av1_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
int bsize, int best_mode_index);
static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
@@ -171,7 +171,7 @@
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
@@ -179,17 +179,17 @@
x->errorperbit += (x->errorperbit == 0);
}
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
+void av1_setup_pred_block(const MACROBLOCKD *xd,
struct buf_2d dst[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src, int mi_row,
int mi_col, const struct scale_factors *scale,
const struct scale_factors *scale_uv);
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
aom_bit_depth_t bit_depth);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RD_H_
+#endif // AV1_ENCODER_RD_H_
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index e00717a..170cdf2 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -80,7 +80,7 @@
};
#define LAST_NEW_MV_INDEX 6
-static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
+static const MODE_DEFINITION av1_mode_order[MAX_MODES] = {
{ NEARESTMV, { LAST_FRAME, NONE } },
{ NEARESTMV, { ALTREF_FRAME, NONE } },
{ NEARESTMV, { GOLDEN_FRAME, NONE } },
@@ -122,7 +122,7 @@
{ D45_PRED, { INTRA_FRAME, NONE } },
};
-static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
+static const REF_DEFINITION av1_ref_order[MAX_REFS] = {
{ { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
{ { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
{ { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
@@ -153,7 +153,7 @@
}
}
-static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+static void model_rd_for_sb(AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
MACROBLOCKD *xd, int *out_rate_sum,
int64_t *out_dist_sum, int *skip_txfm_sb,
int64_t *skip_sse_sb) {
@@ -173,9 +173,9 @@
int rate;
int64_t dist;
const int dequant_shift =
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
(xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
3;
x->pred_sse[ref] = 0;
@@ -250,7 +250,7 @@
rate_sum += rate;
dist_sum += dist;
} else {
- vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
+ av1_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
pd->dequant[1] >> dequant_shift, &rate,
&dist);
rate_sum += rate;
@@ -264,7 +264,7 @@
*out_dist_sum = dist_sum << 4;
}
-int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz) {
int i;
int64_t error = 0, sqcoeff = 0;
@@ -279,7 +279,7 @@
return error;
}
-int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
int block_size) {
int i;
int64_t error = 0;
@@ -292,8 +292,8 @@
return error;
}
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz, int bd) {
int i;
@@ -313,7 +313,7 @@
*ssz = sqcoeff;
return error;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* The trailing '0' is a terminator which is used inside cost_coeffs() to
* decide whether to include cost of a trailing EOB node or not (i.e. we
@@ -342,10 +342,10 @@
uint8_t token_cache[32 * 32];
int pt = combine_entropy_contexts(*A, *L);
int c, cost;
-#if CONFIG_VPX_HIGHBITDEPTH
- const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
#else
- const int *cat6_high_cost = vp10_get_high_cost_table(8);
+ const int *cat6_high_cost = av1_get_high_cost_table(8);
#endif
// Check for consistency of tx_size with mode info
@@ -363,11 +363,11 @@
int v = qcoeff[0];
int16_t prev_t;
EXTRABIT e;
- vp10_get_token_extra(v, &prev_t, &e);
+ av1_get_token_extra(v, &prev_t, &e);
cost = (*token_costs)[0][pt][prev_t] +
- vp10_get_cost(prev_t, e, cat6_high_cost);
+ av1_get_cost(prev_t, e, cat6_high_cost);
- token_cache[0] = vp10_pt_energy_class[prev_t];
+ token_cache[0] = av1_pt_energy_class[prev_t];
++token_costs;
// ac tokens
@@ -376,15 +376,15 @@
int16_t t;
v = qcoeff[rc];
- vp10_get_token_extra(v, &t, &e);
+ av1_get_token_extra(v, &t, &e);
if (use_fast_coef_costing) {
cost += (*token_costs)[!prev_t][!prev_t][t] +
- vp10_get_cost(t, e, cat6_high_cost);
+ av1_get_cost(t, e, cat6_high_cost);
} else {
pt = get_coef_context(nb, token_cache, c);
cost += (*token_costs)[!prev_t][pt][t] +
- vp10_get_cost(t, e, cat6_high_cost);
- token_cache[rc] = vp10_pt_energy_class[t];
+ av1_get_cost(t, e, cat6_high_cost);
+ token_cache[rc] = av1_pt_energy_class[t];
}
prev_t = t;
if (!--band_left) {
@@ -420,15 +420,15 @@
int shift = tx_size == TX_32X32 ? 0 : 2;
tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
- *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+ *out_dist = av1_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
&this_sse, bd) >>
shift;
#else
*out_dist =
- vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ av1_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
+#endif // CONFIG_AOM_HIGHBITDEPTH
*out_sse = this_sse >> shift;
}
@@ -454,21 +454,21 @@
if (!is_inter_block(mbmi)) {
struct encode_b_args arg = { x, NULL, &mbmi->skip };
- vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
+ av1_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
tx_size, &arg);
dist_block(x, plane, block, tx_size, &dist, &sse);
} else if (max_txsize_lookup[plane_bsize] == tx_size) {
if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
SKIP_TXFM_NONE) {
// full forward transform and quantization
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
dist_block(x, plane, block, tx_size, &dist, &sse);
} else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
SKIP_TXFM_AC_ONLY) {
// compute DC coefficient
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
- vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
dist = sse;
@@ -476,7 +476,7 @@
const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
const int64_t resd_sse = coeff[0] - dqcoeff[0];
int64_t dc_correct = orig_sse - resd_sse * resd_sse;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dc_correct >>= ((xd->bd - 8) * 2);
#endif
if (tx_size != TX_32X32) dc_correct >>= 2;
@@ -492,7 +492,7 @@
}
} else {
// full forward transform and quantization
- vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+ av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
dist_block(x, plane, block, tx_size, &dist, &sse);
}
@@ -534,7 +534,7 @@
const struct macroblockd_plane *const pd = &xd->plane[plane];
TX_TYPE tx_type;
struct rdcost_block_args args;
- vp10_zero(args);
+ av1_zero(args);
args.x = x;
args.best_rd = ref_best_rd;
args.use_fast_coef_costing = use_fast_coef_casting;
@@ -542,12 +542,12 @@
if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+ av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
tx_type = get_tx_type(pd->plane_type, xd, 0);
args.so = get_scan(tx_size, tx_type);
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
&args);
if (args.exit_early) {
*rate = INT_MAX;
@@ -562,11 +562,11 @@
}
}
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_largest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *sse,
int64_t ref_best_rd, BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -574,9 +574,9 @@
TX_TYPE tx_type, best_tx_type = DCT_DCT;
int r, s;
int64_t d, psse, this_rd, best_rd = INT64_MAX;
- aom_prob skip_prob = vp10_get_skip_prob(cm, xd);
- int s0 = vp10_cost_bit(skip_prob, 0);
- int s1 = vp10_cost_bit(skip_prob, 1);
+ aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+ int s0 = av1_cost_bit(skip_prob, 0);
+ int s1 = av1_cost_bit(skip_prob, 1);
const int is_inter = is_inter_block(mbmi);
mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
@@ -619,7 +619,7 @@
}
}
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_smallest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
@@ -632,15 +632,15 @@
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_tx_size_from_rd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *psse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- aom_prob skip_prob = vp10_get_skip_prob(cm, xd);
+ aom_prob skip_prob = av1_get_skip_prob(cm, xd);
int r, s;
int64_t d, sse;
int64_t rd = INT64_MAX;
@@ -655,8 +655,8 @@
const aom_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
assert(skip_prob > 0);
- s0 = vp10_cost_bit(skip_prob, 0);
- s1 = vp10_cost_bit(skip_prob, 1);
+ s0 = av1_cost_bit(skip_prob, 0);
+ s1 = av1_cost_bit(skip_prob, 1);
if (tx_select) {
start_tx = max_tx_size;
@@ -679,9 +679,9 @@
int r_tx_size = 0;
for (m = 0; m <= n - (n == (int)max_tx_size); ++m) {
if (m == n)
- r_tx_size += vp10_cost_zero(tx_probs[m]);
+ r_tx_size += av1_cost_zero(tx_probs[m]);
else
- r_tx_size += vp10_cost_one(tx_probs[m]);
+ r_tx_size += av1_cost_one(tx_probs[m]);
}
if (n >= TX_32X32 && tx_type != DCT_DCT) {
@@ -744,7 +744,7 @@
cpi->sf.use_fast_coef_costing);
}
-static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void super_block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *psse,
BLOCK_SIZE bs, int64_t ref_best_rd) {
MACROBLOCKD *xd = &x->e_mbd;
@@ -783,7 +783,7 @@
return 0;
}
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+static int64_t rd_pick_intra4x4block(AV1_COMP *cpi, MACROBLOCK *x, int row,
int col, PREDICTION_MODE *best_mode,
const int *bmode_costs, ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l, int *bestrate,
@@ -804,7 +804,7 @@
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
uint8_t best_dst[8 * 8];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t best_dst16[8 * 8];
#endif
@@ -812,7 +812,7 @@
memcpy(tl, l, sizeof(tl));
xd->mi[0]->mbmi.tx_size = TX_4X4;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -837,42 +837,42 @@
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
int16_t *const src_diff =
- vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+ av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
dst_stride, col + idx, row + idy, 0);
aom_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
dst_stride, xd->bd);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
const scan_order *so = get_scan(TX_4X4, tx_type);
- vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
- vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+ av1_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
+ av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
dst_stride, p->eobs[block], xd->bd,
DCT_DCT, 1);
} else {
int64_t unused;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
const scan_order *so = get_scan(TX_4X4, tx_type);
- vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
- vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+ av1_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
+ av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
distortion +=
- vp10_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
+ av1_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
16, &unused, xd->bd) >>
2;
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
dst_stride, p->eobs[block], xd->bd,
tx_type, 0);
}
@@ -907,7 +907,7 @@
return best_rd;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -932,41 +932,41 @@
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
int16_t *const src_diff =
- vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+ av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
dst_stride, col + idx, row + idy, 0);
aom_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
const scan_order *so = get_scan(TX_4X4, tx_type);
- vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
- vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+ av1_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
+ av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
dst_stride, p->eobs[block], DCT_DCT, 1);
} else {
int64_t unused;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
const scan_order *so = get_scan(TX_4X4, tx_type);
- vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
- vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+ av1_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
+ av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
distortion +=
- vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
+ av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
&unused) >>
2;
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
dst_stride, p->eobs[block], tx_type, 0);
}
}
@@ -999,7 +999,7 @@
return best_rd;
}
-static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
+static int64_t rd_pick_intra_sub_8x8_y_mode(AV1_COMP *cpi, MACROBLOCK *mb,
int *rate, int *rate_y,
int64_t *distortion,
int64_t best_rd) {
@@ -1030,8 +1030,8 @@
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
if (cpi->common.frame_type == KEY_FRAME) {
- const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
- const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
+ const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, i);
+ const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, i);
bmode_costs = cpi->y_mode_costs[A][L];
}
@@ -1065,7 +1065,7 @@
}
// This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sby_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
int64_t best_rd) {
@@ -1080,8 +1080,8 @@
int *bmode_costs;
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
- const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
- const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
+ const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, 0);
+ const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, 0);
bmode_costs = cpi->y_mode_costs[A][L];
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
@@ -1119,7 +1119,7 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int super_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1135,7 +1135,7 @@
if (is_inter_block(mbmi) && is_cost_valid) {
int plane;
for (plane = 1; plane < MAX_MB_PLANE; ++plane)
- vp10_subtract_plane(x, bsize, plane);
+ av1_subtract_plane(x, bsize, plane);
}
*rate = 0;
@@ -1167,7 +1167,7 @@
return is_cost_valid;
}
-static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t rd_pick_intra_sbuv_mode(AV1_COMP *cpi, MACROBLOCK *x,
PICK_MODE_CONTEXT *ctx, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize,
@@ -1207,7 +1207,7 @@
return best_rd;
}
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_sbuv_dcpred(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
int *rate_tokenonly, int64_t *distortion,
int *skippable, BLOCK_SIZE bsize) {
int64_t unused;
@@ -1221,7 +1221,7 @@
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
+static void choose_intra_uv_mode(AV1_COMP *cpi, MACROBLOCK *const x,
PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
TX_SIZE max_tx_size, int *rate_uv,
int *rate_uv_tokenonly, int64_t *dist_uv,
@@ -1241,13 +1241,13 @@
*mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
}
-static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+static int cost_mv_ref(const AV1_COMP *cpi, PREDICTION_MODE mode,
int mode_context) {
assert(is_inter_mode(mode));
return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
}
-static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
+static int set_and_cost_bmi_mvs(AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
int i, PREDICTION_MODE mode, int_mv this_mv[2],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
int_mv seg_mvs[MAX_REF_FRAMES],
@@ -1265,12 +1265,12 @@
switch (mode) {
case NEWMV:
this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
- thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+ thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
if (is_compound) {
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
thismvcost +=
- vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
+ av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
mvcost, MV_COST_WEIGHT_SUB);
}
break;
@@ -1300,7 +1300,7 @@
thismvcost;
}
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t encode_inter_mb_segment(AV1_COMP *cpi, MACROBLOCK *x,
int64_t best_yrd, int i, int *labelyrate,
int64_t *distortion, int64_t *sse,
ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
@@ -1317,43 +1317,43 @@
void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride);
const uint8_t *const src =
- &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
uint8_t *const dst =
- &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+ &pd->dst.buf[av1_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
int64_t thisdistortion = 0, thissse = 0;
int thisrate = 0;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
const scan_order *so = get_scan(TX_4X4, tx_type);
- vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
+ av1_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_highbd_fwht4x4
+ fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_highbd_fwht4x4
: aom_highbd_fdct4x4;
} else {
- fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : aom_fdct4x4;
+ fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_fwht4x4 : aom_fdct4x4;
}
#else
- fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : aom_fdct4x4;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_fwht4x4 : aom_fdct4x4;
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
aom_highbd_subtract_block(
height, width,
- vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
+ av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
p->src.stride, dst, pd->dst.stride, xd->bd);
} else {
- aom_subtract_block(height, width, vp10_raster_block_offset_int16(
+ aom_subtract_block(height, width, av1_raster_block_offset_int16(
BLOCK_8X8, i, p->src_diff),
8, src, p->src.stride, dst, pd->dst.stride);
}
#else
aom_subtract_block(height, width,
- vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
8, src, p->src.stride, dst, pd->dst.stride);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
k = i;
for (idy = 0; idy < height / 4; ++idy) {
@@ -1363,21 +1363,21 @@
k += (idy * 2 + idx);
coeff = BLOCK_OFFSET(p->coeff, k);
- fwd_txm4x4(vp10_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
+ fwd_txm4x4(av1_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
coeff, 8);
- vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
-#if CONFIG_VPX_HIGHBITDEPTH
+ av1_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- thisdistortion += vp10_highbd_block_error(
+ thisdistortion += av1_highbd_block_error(
coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, xd->bd);
} else {
thisdistortion +=
- vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
+ av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
}
#else
thisdistortion +=
- vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
+#endif // CONFIG_AOM_HIGHBITDEPTH
thissse += ssz;
thisrate +=
cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, so->scan,
@@ -1433,15 +1433,15 @@
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
p->src.buf =
- &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
pd->pre[0].buf =
&pd->pre[0]
- .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
+ .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
if (has_second_ref(mbmi))
pd->pre[1].buf =
&pd->pre[1]
- .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
+ .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
}
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -1458,7 +1458,7 @@
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
-static int check_best_zero_mv(const VP10_COMP *cpi,
+static int check_best_zero_mv(const AV1_COMP *cpi,
const uint8_t mode_context[MAX_REF_FRAMES],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
int this_mode,
@@ -1494,11 +1494,11 @@
return 1;
}
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
int_mv *frame_mv, int mi_row, int mi_col,
int_mv single_newmv[MAX_REF_FRAMES],
int *rate_mv) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
MACROBLOCKD *xd = &x->e_mbd;
@@ -1507,24 +1507,24 @@
mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
int_mv ref_mv[2];
int ite, ref;
- const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
+ const InterpKernel *kernel = av1_filter_kernels[mbmi->interp_filter];
struct scale_factors sf;
// Do joint motion search in compound mode to get more accurate mv.
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
int last_besterr[2] = { INT_MAX, INT_MAX };
const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
- vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
+ av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
};
// Prediction buffer from second frame.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
uint8_t *second_pred;
#else
DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (ref = 0; ref < 2; ++ref) {
ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
@@ -1536,7 +1536,7 @@
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[ref][i] = xd->plane[i].pre[ref];
- vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
+ av1_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
NULL);
}
@@ -1545,13 +1545,13 @@
// Since we have scaled the reference frames to match the size of the current
// frame we must use a unit scaling factor during mode selection.
-#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
cm->height, cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
cm->height);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Allow joint search multiple times iteratively for each reference frame
// and break out of the search loop if it couldn't find a better mv.
@@ -1575,30 +1575,30 @@
ref_yv12[1] = xd->plane[0].pre[1];
// Get the prediction block from the 'other' reference frame.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
- vp10_highbd_build_inter_predictor(
+ av1_highbd_build_inter_predictor(
ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
&frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
} else {
second_pred = (uint8_t *)second_pred_alloc_16;
- vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
second_pred, pw, &frame_mv[refs[!id]].as_mv,
&sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
mi_col * MI_SIZE, mi_row * MI_SIZE);
}
#else
- vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
pw, ph, 0, kernel, MV_PRECISION_Q3,
mi_col * MI_SIZE, mi_row * MI_SIZE);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Do compound motion search on the current reference frame.
if (id) xd->plane[0].pre[0] = ref_yv12[id];
- vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
+ av1_set_mv_search_range(x, &ref_mv[id].as_mv);
// Use the mv result from the single mode as mv predictor.
tmp_mv = frame_mv[refs[id]].as_mv;
@@ -1607,11 +1607,11 @@
tmp_mv.row >>= 3;
// Small-range full-pixel motion search.
- bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
+ bestsme = av1_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
&cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
second_pred);
if (bestsme < INT_MAX)
- bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
+ bestsme = av1_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
x->mv_col_min = tmp_col_min;
@@ -1650,14 +1650,14 @@
xd->plane[i].pre[ref] = backup_yv12[ref][i];
}
- *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+ *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
}
static int64_t rd_pick_best_sub8x8_mode(
- VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+ AV1_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
@@ -1671,7 +1671,7 @@
int k, br = 0, idx, idy;
int64_t bd = 0, block_sse = 0;
PREDICTION_MODE this_mode;
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const int label_count = 4;
@@ -1687,7 +1687,7 @@
const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
- vp10_zero(*bsi);
+ av1_zero(*bsi);
bsi->segment_rd = best_rd;
bsi->ref_mv[0] = best_ref_mv;
@@ -1721,7 +1721,7 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
frame_mv[ZEROMV][frame].as_int = 0;
- vp10_append_sub8x8_mvs_for_idx(
+ av1_append_sub8x8_mvs_for_idx(
cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
&frame_mv[NEARMV][frame], mbmi_ext->mode_context);
}
@@ -1778,7 +1778,7 @@
// max mv magnitude and the best ref mvs of the current block for
// the given reference.
step_param =
- (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
+ (av1_init_search_range(max_mv) + cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -1795,9 +1795,9 @@
// adjust src pointer for this block
mi_buf_shift(x, i);
- vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
+ av1_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
- bestsme = vp10_full_pixel_search(
+ bestsme = av1_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
&bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
@@ -1985,14 +1985,14 @@
*returntotrate = bsi->r;
*returndistortion = bsi->d;
*returnyrate = bsi->segment_yrate;
- *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
+ *skippable = av1_is_skippable_in_plane(x, BLOCK_8X8, 0);
*psse = bsi->sse;
mbmi->mode = bsi->modes[3];
return bsi->segment_rd;
}
-static void estimate_ref_frame_costs(const VP10_COMMON *cm,
+static void estimate_ref_frame_costs(const AV1_COMMON *cm,
const MACROBLOCKD *xd, int segment_id,
unsigned int *ref_costs_single,
unsigned int *ref_costs_comp,
@@ -2004,47 +2004,47 @@
memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
*comp_mode_p = 128;
} else {
- aom_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
+ aom_prob intra_inter_p = av1_get_intra_inter_prob(cm, xd);
aom_prob comp_inter_p = 128;
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
+ comp_inter_p = av1_get_reference_mode_prob(cm, xd);
*comp_mode_p = comp_inter_p;
} else {
*comp_mode_p = 128;
}
- ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
+ ref_costs_single[INTRA_FRAME] = av1_cost_bit(intra_inter_p, 0);
if (cm->reference_mode != COMPOUND_REFERENCE) {
- aom_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
- aom_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
- unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+ aom_prob ref_single_p1 = av1_get_pred_prob_single_ref_p1(cm, xd);
+ aom_prob ref_single_p2 = av1_get_pred_prob_single_ref_p2(cm, xd);
+ unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- base_cost += vp10_cost_bit(comp_inter_p, 0);
+ base_cost += av1_cost_bit(comp_inter_p, 0);
ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
ref_costs_single[ALTREF_FRAME] = base_cost;
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
- ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
- ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+ ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 1);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
+ ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p2, 0);
+ ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
} else {
ref_costs_single[LAST_FRAME] = 512;
ref_costs_single[GOLDEN_FRAME] = 512;
ref_costs_single[ALTREF_FRAME] = 512;
}
if (cm->reference_mode != SINGLE_REFERENCE) {
- aom_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
- unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+ aom_prob ref_comp_p = av1_get_pred_prob_comp_ref_p(cm, xd);
+ unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- base_cost += vp10_cost_bit(comp_inter_p, 1);
+ base_cost += av1_cost_bit(comp_inter_p, 1);
- ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
- ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
+ ref_costs_comp[LAST_FRAME] = base_cost + av1_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[GOLDEN_FRAME] = base_cost + av1_cost_bit(ref_comp_p, 1);
} else {
ref_costs_comp[LAST_FRAME] = 512;
ref_costs_comp[GOLDEN_FRAME] = 512;
@@ -2073,13 +2073,13 @@
sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
}
-static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
+static void setup_buffer_inter(AV1_COMP *cpi, MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
BLOCK_SIZE block_size, int mi_row, int mi_col,
int_mv frame_nearest_mv[MAX_REF_FRAMES],
int_mv frame_near_mv[MAX_REF_FRAMES],
struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mi = xd->mi[0];
@@ -2091,30 +2091,30 @@
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
// use the UV scaling factors.
- vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+ av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
+ av1_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
NULL, mbmi_ext->mode_context);
// Candidate refinement carried out at encoder and decoder
- vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
+ av1_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
&frame_nearest_mv[ref_frame],
&frame_near_mv[ref_frame]);
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
- if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
- vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+ if (!av1_is_scaled(sf) && block_size >= BLOCK_8X8)
+ av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
block_size);
}
-static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
- const VP10_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
int bestsme = INT_MAX;
@@ -2131,7 +2131,7 @@
int cost_list[5];
const YV12_BUFFER_CONFIG *scaled_ref_frame =
- vp10_get_scaled_ref_frame(cpi, ref);
+ av1_get_scaled_ref_frame(cpi, ref);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2145,10 +2145,10 @@
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
- vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+ av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
- vp10_set_mv_search_range(x, &ref_mv);
+ av1_set_mv_search_range(x, &ref_mv);
// Work out the size of the first step in the mv step search.
// 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
@@ -2157,7 +2157,7 @@
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
step_param =
- (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2;
} else {
step_param = cpi->mv_step_param;
@@ -2202,7 +2202,7 @@
mvp_full.col >>= 3;
mvp_full.row >>= 3;
- bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+ bestsme = av1_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
cond_cost_list(cpi, cost_list), &ref_mv,
&tmp_mv->as_mv, INT_MAX, 1);
@@ -2219,7 +2219,7 @@
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
}
- *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
x->mvcost, MV_COST_WEIGHT);
if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
@@ -2247,7 +2247,7 @@
// However, once established that vector may be usable through the nearest and
// near mv modes to reduce distortion in subsequent blocks and also improve
// visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
+static int discount_newmv_test(const AV1_COMP *cpi, int this_mode,
int_mv this_mv,
int_mv (*mode_mv)[MAX_REF_FRAMES],
int ref_frame) {
@@ -2272,14 +2272,14 @@
}
static int64_t handle_inter_mode(
- VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+ AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
@@ -2290,12 +2290,12 @@
int refs[2] = { mbmi->ref_frame[0],
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv cur_mv[2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
uint8_t *tmp_buf;
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
int pred_exists = 0;
int intpel_mv;
int64_t rd, tmp_rd, best_rd = INT64_MAX;
@@ -2319,13 +2319,13 @@
int64_t skip_sse_sb = INT64_MAX;
int64_t distortion_y = 0, distortion_uv = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
} else {
tmp_buf = (uint8_t *)tmp_buf16;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (pred_filter_search) {
INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
@@ -2358,10 +2358,10 @@
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
single_newmv, &rate_mv);
} else {
- rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
&x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
- rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+ rate_mv += av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
&x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
@@ -2450,7 +2450,7 @@
int64_t tmp_skip_sse = INT64_MAX;
mbmi->interp_filter = i;
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
if (i > 0 && intpel_mv) {
@@ -2481,7 +2481,7 @@
xd->plane[j].dst.stride = 64;
}
}
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
&tmp_skip_sse);
@@ -2531,7 +2531,7 @@
// Set the appropriate filter
mbmi->interp_filter =
cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
- rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
+ rs = cm->interp_filter == SWITCHABLE ? av1_get_switchable_rate(cpi, xd) : 0;
if (pred_exists) {
if (best_needs_copy) {
@@ -2548,7 +2548,7 @@
// Handles the special case when a filter that is not in the
// switchable list (ex. bilinear) is indicated at the frame level, or
// skip condition holds.
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
&skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
@@ -2584,7 +2584,7 @@
int64_t rdcosty = INT64_MAX;
// Y cost and distortion
- vp10_subtract_plane(x, bsize, 0);
+ av1_subtract_plane(x, bsize, 0);
super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
ref_best_rd);
@@ -2618,7 +2618,7 @@
*disable_skip = 1;
// The cost of skip bit needs to be added.
- *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
*distortion = skip_sse_sb;
}
@@ -2629,10 +2629,10 @@
return 0; // The rate-distortion cost will be re-calculated by caller.
}
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+void av1_rd_pick_intra_mode_sb(AV1_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblockd_plane *const pd = xd->plane;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
@@ -2664,11 +2664,11 @@
if (y_skip && uv_skip) {
rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
rd_cost->dist = dist_y + dist_uv;
} else {
rd_cost->rate =
- rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate_y + rate_uv + av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
rd_cost->dist = dist_y + dist_uv;
}
@@ -2682,7 +2682,7 @@
#define LOW_VAR_THRESH 16
#define VLOW_ADJ_MAX 25
#define VHIGH_ADJ_MAX 8
-static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
+static void rd_variance_adjustment(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int64_t *this_rd,
MV_REFERENCE_FRAME ref_frame,
unsigned int source_variance) {
@@ -2694,18 +2694,18 @@
if (*this_rd == INT64_MAX) return;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- recon_variance = vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
+ recon_variance = av1_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
bsize, xd->bd);
} else {
recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif // CONFIG_AOM_HIGHBITDEPTH
if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
absvar_diff = (source_variance > recon_variance)
@@ -2734,7 +2734,7 @@
}
// Do we have an internal image edge (e.g. formatting bars).
-int vp10_internal_image_edge(VP10_COMP *cpi) {
+int av1_internal_image_edge(AV1_COMP *cpi) {
return (cpi->oxcf.pass == 2) &&
((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
(cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
@@ -2743,7 +2743,7 @@
// Checks to see if a super block is on a horizontal image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
+int av1_active_h_edge(AV1_COMP *cpi, int mi_row, int mi_step) {
int top_edge = 0;
int bottom_edge = cpi->common.mi_rows;
int is_active_h_edge = 0;
@@ -2770,7 +2770,7 @@
// Checks to see if a super block is on a vertical image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
+int av1_active_v_edge(AV1_COMP *cpi, int mi_col, int mi_step) {
int left_edge = 0;
int right_edge = cpi->common.mi_cols;
int is_active_v_edge = 0;
@@ -2797,17 +2797,17 @@
// Checks to see if a super block is at the edge of the active image.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
- return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
- vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
+int av1_active_edge_sb(AV1_COMP *cpi, int mi_row, int mi_col) {
+ return av1_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
+ av1_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
}
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2842,7 +2842,7 @@
int64_t dist_uv[TX_SIZES];
int skip_uv[TX_SIZES];
PREDICTION_MODE mode_uv[TX_SIZES];
- const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+ const int intra_cost_penalty = av1_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int best_skip2 = 0;
uint8_t ref_frame_skip_mask[2] = { 0 };
@@ -2856,7 +2856,7 @@
int64_t mask_filter = 0;
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
- vp10_zero(best_mbmode);
+ av1_zero(best_mbmode);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
@@ -2991,9 +2991,9 @@
int64_t total_sse = INT64_MAX;
int early_term = 0;
- this_mode = vp10_mode_order[mode_index].mode;
- ref_frame = vp10_mode_order[mode_index].ref_frame[0];
- second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
+ this_mode = av1_mode_order[mode_index].mode;
+ ref_frame = av1_mode_order[mode_index].ref_frame[0];
+ second_ref_frame = av1_mode_order[mode_index].ref_frame[1];
// Look at the reference frame of the best mode so far and set the
// skip mask to look at a subset of the remaining modes.
@@ -3130,7 +3130,7 @@
&mask_filter, filter_cache);
if (this_rd == INT64_MAX) continue;
- compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+ compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
}
@@ -3149,15 +3149,15 @@
rate2 -= (rate_y + rate_uv);
// Cost the skip mb case
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
} else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
} else {
// FIXME(rbultje) make this work for splitmv also
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
distortion2 = total_sse;
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
@@ -3165,7 +3165,7 @@
}
} else {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
// Calculate the final RD estimate for this mode.
@@ -3226,11 +3226,11 @@
int qstep = xd->plane[0].dequant[1];
// TODO(debargha): Enhance this by specializing for each mode_index
int scale = 4;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
qstep >>= (xd->bd - 8);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (x->source_variance < UINT_MAX) {
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
@@ -3348,7 +3348,7 @@
!is_inter_block(&best_mbmode));
if (!cpi->rc.is_src_frame_alt_ref)
- vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+ av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
sf->adaptive_rd_thresh, bsize, best_mode_index);
// macroblock modes
@@ -3372,7 +3372,7 @@
if (cm->interp_filter == SWITCHABLE)
assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
} else {
- vp10_zero(best_filter_diff);
+ av1_zero(best_filter_diff);
}
// TODO(yunqingwang): Moving this line in front of the above best_filter_diff
@@ -3385,12 +3385,12 @@
int max_plane = is_inter_block(&xd->mi[0]->mbmi) ? MAX_MB_PLANE : 1;
for (plane = 0; plane < max_plane; ++plane) {
x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
- has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
+ has_high_freq_coeff |= av1_has_high_freq_in_plane(x, bsize, plane);
}
for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
- has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
+ has_high_freq_coeff |= av1_has_high_freq_in_plane(x, bsize, plane);
}
best_mode_skippable |= !has_high_freq_coeff;
@@ -3402,12 +3402,12 @@
best_filter_diff, best_mode_skippable);
}
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sb_seg_skip(AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
unsigned char segment_id = mbmi->segment_id;
@@ -3447,7 +3447,7 @@
int best_rs = INT_MAX;
for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
mbmi->interp_filter = i;
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
if (rs < best_rs) {
best_rs = rs;
best_filter = mbmi->interp_filter;
@@ -3458,13 +3458,13 @@
// Set the appropriate filter
if (cm->interp_filter == SWITCHABLE) {
mbmi->interp_filter = best_filter;
- rate2 += vp10_get_switchable_rate(cpi, xd);
+ rate2 += av1_get_switchable_rate(cpi, xd);
} else {
mbmi->interp_filter = cm->interp_filter;
}
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
+ rate2 += av1_cost_bit(comp_mode_p, comp_pred);
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
@@ -3484,22 +3484,22 @@
assert((cm->interp_filter == SWITCHABLE) ||
(cm->interp_filter == mbmi->interp_filter));
- vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+ av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
- vp10_zero(best_pred_diff);
- vp10_zero(best_filter_diff);
+ av1_zero(best_pred_diff);
+ av1_zero(best_filter_diff);
if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
}
-void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sub8x8(AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3527,7 +3527,7 @@
int64_t dist_uv;
int skip_uv;
PREDICTION_MODE mode_uv = DC_PRED;
- const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+ const int intra_cost_penalty = av1_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int_mv seg_mvs[4][MAX_REF_FRAMES];
b_mode_info best_bmodes[4];
@@ -3536,10 +3536,10 @@
int64_t mask_filter = 0;
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
int internal_active_edge =
- vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+ av1_active_edge_sb(cpi, mi_row, mi_col) && av1_internal_image_edge(cpi);
memset(x->zcoeff_blk[TX_4X4], 0, 4);
- vp10_zero(best_mbmode);
+ av1_zero(best_mbmode);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
@@ -3583,8 +3583,8 @@
int64_t total_sse = INT_MAX;
int early_term = 0;
- ref_frame = vp10_ref_order[ref_index].ref_frame[0];
- second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
+ ref_frame = av1_ref_order[ref_index].ref_frame[0];
+ second_ref_frame = av1_ref_order[ref_index].ref_frame[1];
// Look at the reference frame of the best mode so far and set the
// skip mask to look at a subset of the remaining modes.
@@ -3636,11 +3636,11 @@
// TODO(jingning, jkoleszar): scaling reference frame not supported for
// sub8x8 blocks.
if (ref_frame > INTRA_FRAME &&
- vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+ av1_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
continue;
if (second_ref_frame > INTRA_FRAME &&
- vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
+ av1_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
continue;
if (comp_pred)
@@ -3752,7 +3752,7 @@
mi_row, mi_col);
if (tmp_rd == INT64_MAX) continue;
- rs = vp10_get_switchable_rate(cpi, xd);
+ rs = av1_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
filter_cache[switchable_filter_index] = tmp_rd;
filter_cache[SWITCHABLE_FILTERS] =
@@ -3823,13 +3823,13 @@
distortion2 += distortion;
if (cm->interp_filter == SWITCHABLE)
- rate2 += vp10_get_switchable_rate(cpi, xd);
+ rate2 += av1_get_switchable_rate(cpi, xd);
if (!mode_excluded)
mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
: cm->reference_mode == COMPOUND_REFERENCE;
- compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+ compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
tmp_best_rdu =
best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
@@ -3838,7 +3838,7 @@
if (tmp_best_rdu > 0) {
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
- vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
+ av1_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
&uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -3869,10 +3869,10 @@
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
} else {
// FIXME(rbultje) make this work for splitmv also
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
distortion2 = total_sse;
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
@@ -3882,7 +3882,7 @@
}
} else {
// Add in the cost of the no skip flag.
- rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
}
// Calculate the final RD estimate for this mode.
@@ -3930,11 +3930,11 @@
int qstep = xd->plane[0].dequant[1];
// TODO(debargha): Enhance this by specializing for each mode_index
int scale = 4;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
qstep >>= (xd->bd - 8);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (x->source_variance < UINT_MAX) {
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
@@ -4026,7 +4026,7 @@
(cm->interp_filter == best_mbmode.interp_filter) ||
!is_inter_block(&best_mbmode));
- vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+ av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
sf->adaptive_rd_thresh, bsize, best_ref_index);
// macroblock modes
@@ -4059,7 +4059,7 @@
if (cm->interp_filter == SWITCHABLE)
assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
} else {
- vp10_zero(best_filter_diff);
+ av1_zero(best_filter_diff);
}
store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index e07aeba..6097b42 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_RDOPT_H_
-#define VP10_ENCODER_RDOPT_H_
+#ifndef AV1_ENCODER_RDOPT_H_
+#define AV1_ENCODER_RDOPT_H_
#include "av1/common/blockd.h"
@@ -22,40 +22,40 @@
#endif
struct TileInfo;
-struct VP10_COMP;
+struct AV1_COMP;
struct macroblock;
struct RD_COST;
-void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
+void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd);
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs);
-#if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd);
#endif
-void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
+void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row, int mi_col,
struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
-void vp10_rd_pick_inter_mode_sb_seg_skip(
- struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+void av1_rd_pick_inter_mode_sb_seg_skip(
+ struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
-int vp10_internal_image_edge(struct VP10_COMP *cpi);
-int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
-int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
-int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
+int av1_internal_image_edge(struct AV1_COMP *cpi);
+int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
+int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
+int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row,
int mi_col, struct RD_COST *rd_cost,
@@ -66,4 +66,4 @@
} // extern "C"
#endif
-#endif // VP10_ENCODER_RDOPT_H_
+#endif // AV1_ENCODER_RDOPT_H_
diff --git a/av1/encoder/resize.c b/av1/encoder/resize.c
index 18731d6..c5c5699 100644
--- a/av1/encoder/resize.c
+++ b/av1/encoder/resize.c
@@ -16,9 +16,9 @@
#include <stdlib.h>
#include <string.h>
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#include "aom_dsp/aom_dsp_common.h"
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#include "aom_ports/mem.h"
#include "av1/common/common.h"
#include "av1/encoder/resize.h"
@@ -133,8 +133,8 @@
};
// Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
-static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
+static const int16_t av1_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t av1_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
int outlength16 = outlength * 16;
@@ -240,8 +240,8 @@
static void down2_symeven(const uint8_t *const input, int length,
uint8_t *output) {
// Actual filter len = 2 * filter_len_half.
- const int16_t *filter = vp10_down2_symeven_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+ const int16_t *filter = av1_down2_symeven_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
int i, j;
uint8_t *optr = output;
int l1 = filter_len_half;
@@ -296,8 +296,8 @@
static void down2_symodd(const uint8_t *const input, int length,
uint8_t *output) {
// Actual filter len = 2 * filter_len_half - 1.
- const int16_t *filter = vp10_down2_symodd_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+ const int16_t *filter = av1_down2_symodd_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
int i, j;
uint8_t *optr = output;
int l1 = filter_len_half - 1;
@@ -426,7 +426,7 @@
}
}
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
+void av1_resize_plane(const uint8_t *const input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride) {
int i;
@@ -451,7 +451,7 @@
free(arrbuf);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_interpolate(const uint16_t *const input, int inlength,
uint16_t *output, int outlength, int bd) {
const int64_t delta =
@@ -542,8 +542,8 @@
static void highbd_down2_symeven(const uint16_t *const input, int length,
uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half.
- static const int16_t *filter = vp10_down2_symeven_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+ static const int16_t *filter = av1_down2_symeven_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
int i, j;
uint16_t *optr = output;
int l1 = filter_len_half;
@@ -598,8 +598,8 @@
static void highbd_down2_symodd(const uint16_t *const input, int length,
uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half - 1.
- static const int16_t *filter = vp10_down2_symodd_half_filter;
- const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+ static const int16_t *filter = av1_down2_symodd_half_filter;
+ const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
int i, j;
uint16_t *optr = output;
int l1 = filter_len_half - 1;
@@ -715,7 +715,7 @@
}
}
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd) {
int i;
@@ -738,84 +738,84 @@
free(tmpbuf);
free(arrbuf);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
owidth / 2, ouv_stride);
- vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ av1_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
owidth / 2, ouv_stride);
}
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
ouv_stride);
- vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+ av1_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
ouv_stride);
}
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ av1_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
ouv_stride);
- vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ av1_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
ouv_stride);
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride, bd);
- vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ av1_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ av1_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
owidth / 2, ouv_stride, bd);
}
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+ av1_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+ av1_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
owidth / 2, ouv_stride, bd);
}
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ av1_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ av1_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
ouv_stride, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/resize.h b/av1/encoder/resize.h
index 030f4e4..a7b02c7 100644
--- a/av1/encoder/resize.h
+++ b/av1/encoder/resize.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_RESIZE_H_
-#define VP10_ENCODER_RESIZE_H_
+#ifndef AV1_ENCODER_RESIZE_H_
+#define AV1_ENCODER_RESIZE_H_
#include <stdio.h>
#include "aom/aom_integer.h"
@@ -19,51 +19,51 @@
extern "C" {
#endif
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
+void av1_resize_plane(const uint8_t *const input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride);
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width, uint8_t *oy,
int oy_stride, uint8_t *ou, uint8_t *ov,
int ouv_stride, int oheight, int owidth);
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
int in_stride, uint8_t *output, int height2,
int width2, int out_stride, int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
int uv_stride, int height, int width,
uint8_t *oy, int oy_stride, uint8_t *ou,
uint8_t *ov, int ouv_stride, int oheight,
int owidth, int bd);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RESIZE_H_
+#endif // AV1_ENCODER_RESIZE_H_
diff --git a/av1/encoder/segmentation.c b/av1/encoder/segmentation.c
index 0c0dcce..5388b94 100644
--- a/av1/encoder/segmentation.c
+++ b/av1/encoder/segmentation.c
@@ -20,30 +20,30 @@
#include "av1/encoder/segmentation.h"
#include "av1/encoder/subexp.h"
-void vp10_enable_segmentation(struct segmentation *seg) {
+void av1_enable_segmentation(struct segmentation *seg) {
seg->enabled = 1;
seg->update_map = 1;
seg->update_data = 1;
}
-void vp10_disable_segmentation(struct segmentation *seg) {
+void av1_disable_segmentation(struct segmentation *seg) {
seg->enabled = 0;
seg->update_map = 0;
seg->update_data = 0;
}
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
unsigned char abs_delta) {
seg->abs_delta = abs_delta;
memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
}
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
seg->feature_mask[segment_id] &= ~(1 << feature_id);
}
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
seg->feature_data[segment_id][feature_id] = 0;
}
@@ -74,7 +74,7 @@
for (i = 0; i < 7; i++) {
const unsigned *ct =
i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
- vp10_prob_diff_update_savings_search(
+ av1_prob_diff_update_savings_search(
ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
}
#else
@@ -92,35 +92,35 @@
const int c4567 = c45 + c67;
// Cost the top node of the tree
- int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
+ int cost = c0123 * av1_cost_zero(probs[0]) + c4567 * av1_cost_one(probs[0]);
// Cost subsequent levels
if (c0123 > 0) {
- cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
+ cost += c01 * av1_cost_zero(probs[1]) + c23 * av1_cost_one(probs[1]);
if (c01 > 0)
- cost += segcounts[0] * vp10_cost_zero(probs[3]) +
- segcounts[1] * vp10_cost_one(probs[3]);
+ cost += segcounts[0] * av1_cost_zero(probs[3]) +
+ segcounts[1] * av1_cost_one(probs[3]);
if (c23 > 0)
- cost += segcounts[2] * vp10_cost_zero(probs[4]) +
- segcounts[3] * vp10_cost_one(probs[4]);
+ cost += segcounts[2] * av1_cost_zero(probs[4]) +
+ segcounts[3] * av1_cost_one(probs[4]);
}
if (c4567 > 0) {
- cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
+ cost += c45 * av1_cost_zero(probs[2]) + c67 * av1_cost_one(probs[2]);
if (c45 > 0)
- cost += segcounts[4] * vp10_cost_zero(probs[5]) +
- segcounts[5] * vp10_cost_one(probs[5]);
+ cost += segcounts[4] * av1_cost_zero(probs[5]) +
+ segcounts[5] * av1_cost_one(probs[5]);
if (c67 > 0)
- cost += segcounts[6] * vp10_cost_zero(probs[6]) +
- segcounts[7] * vp10_cost_one(probs[6]);
+ cost += segcounts[6] * av1_cost_zero(probs[6]) +
+ segcounts[7] * av1_cost_one(probs[6]);
}
return cost;
}
-static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs(const AV1_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
@@ -145,7 +145,7 @@
const int pred_segment_id =
get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
const int pred_flag = pred_segment_id == segment_id;
- const int pred_context = vp10_get_pred_context_seg_id(xd);
+ const int pred_context = av1_get_pred_context_seg_id(xd);
// Store the prediction status for this mb and update counts
// as appropriate
@@ -157,7 +157,7 @@
}
}
-static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
@@ -204,7 +204,7 @@
}
}
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) {
struct segmentation *seg = &cm->seg;
#if CONFIG_MISC_FIXES
struct segmentation_probs *segp = &cm->fc->seg;
@@ -245,7 +245,7 @@
for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
TileInfo tile;
MODE_INFO **mi_ptr;
- vp10_tile_init(&tile, cm, 0, tile_col);
+ av1_tile_init(&tile, cm, 0, tile_col);
mi_ptr = cm->mi_grid_visible + tile.mi_col_start;
for (mi_row = 0; mi_row < cm->mi_rows;
@@ -277,7 +277,7 @@
const int count1 = temporal_predictor_count[i][1];
#if CONFIG_MISC_FIXES
- vp10_prob_diff_update_savings_search(temporal_predictor_count[i],
+ av1_prob_diff_update_savings_search(temporal_predictor_count[i],
segp->pred_probs[i],
&t_nopred_prob[i], DIFF_UPDATE_PROB);
#else
@@ -285,8 +285,8 @@
#endif
// Add in the predictor signaling cost
- t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) +
- count1 * vp10_cost_one(t_nopred_prob[i]);
+ t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) +
+ count1 * av1_cost_one(t_nopred_prob[i]);
}
}
@@ -306,7 +306,7 @@
}
}
-void vp10_reset_segment_features(VP10_COMMON *cm) {
+void av1_reset_segment_features(AV1_COMMON *cm) {
struct segmentation *seg = &cm->seg;
#if !CONFIG_MISC_FIXES
struct segmentation_probs *segp = &cm->segp;
@@ -319,5 +319,5 @@
#if !CONFIG_MISC_FIXES
memset(segp->tree_probs, 255, sizeof(segp->tree_probs));
#endif
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
}
diff --git a/av1/encoder/segmentation.h b/av1/encoder/segmentation.h
index 620e571..418dac6 100644
--- a/av1/encoder/segmentation.h
+++ b/av1/encoder/segmentation.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_SEGMENTATION_H_
-#define VP10_ENCODER_SEGMENTATION_H_
+#ifndef AV1_ENCODER_SEGMENTATION_H_
+#define AV1_ENCODER_SEGMENTATION_H_
#include "av1/common/blockd.h"
#include "av1/encoder/encoder.h"
@@ -19,12 +19,12 @@
extern "C" {
#endif
-void vp10_enable_segmentation(struct segmentation *seg);
-void vp10_disable_segmentation(struct segmentation *seg);
+void av1_enable_segmentation(struct segmentation *seg);
+void av1_disable_segmentation(struct segmentation *seg);
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id);
// The values given for each segment can be either deltas (from the default
@@ -37,15 +37,15 @@
//
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
// the absolute values given).
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
unsigned char abs_delta);
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd);
-void vp10_reset_segment_features(VP10_COMMON *cm);
+void av1_reset_segment_features(AV1_COMMON *cm);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SEGMENTATION_H_
+#endif // AV1_ENCODER_SEGMENTATION_H_
diff --git a/av1/encoder/skin_detection.c b/av1/encoder/skin_detection.c
index b24bdae..11ff16b 100644
--- a/av1/encoder/skin_detection.c
+++ b/av1/encoder/skin_detection.c
@@ -41,7 +41,7 @@
return skin_diff;
}
-int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
+int av1_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
if (y < y_low || y > y_high)
return 0;
else
@@ -50,9 +50,9 @@
#ifdef OUTPUT_YUV_SKINMAP
// For viewing skin map on input source.
-void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) {
+void av1_compute_skin_map(AV1_COMP *const cpi, FILE *yuv_skinmap_file) {
int i, j, mi_row, mi_col;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
uint8_t *y;
const uint8_t *src_y = cpi->Source->y_buffer;
const uint8_t *src_u = cpi->Source->u_buffer;
@@ -79,7 +79,7 @@
const uint8_t ysource = src_y[4 * src_ystride + 4];
const uint8_t usource = src_u[2 * src_uvstride + 2];
const uint8_t vsource = src_v[2 * src_uvstride + 2];
- const int is_skin = vp10_skin_pixel(ysource, usource, vsource);
+ const int is_skin = av1_skin_pixel(ysource, usource, vsource);
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
if (is_skin)
@@ -98,7 +98,7 @@
src_u += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2);
src_v += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2);
}
- vp10_write_yuv_frame_420(&skinmap, yuv_skinmap_file);
+ av1_write_yuv_frame_420(&skinmap, yuv_skinmap_file);
aom_free_frame_buffer(&skinmap);
}
#endif
diff --git a/av1/encoder/skin_detection.h b/av1/encoder/skin_detection.h
index 49c25c1..4071303 100644
--- a/av1/encoder/skin_detection.h
+++ b/av1/encoder/skin_detection.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_SKIN_MAP_H_
-#define VP10_ENCODER_SKIN_MAP_H_
+#ifndef AV1_ENCODER_SKIN_MAP_H_
+#define AV1_ENCODER_SKIN_MAP_H_
#include "av1/common/blockd.h"
@@ -18,19 +18,19 @@
extern "C" {
#endif
-struct VP10_COMP;
+struct AV1_COMP;
// #define OUTPUT_YUV_SKINMAP
-int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr);
+int av1_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr);
#ifdef OUTPUT_YUV_SKINMAP
// For viewing skin map on input source.
-void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file);
+void av1_compute_skin_map(AV1_COMP *const cpi, FILE *yuv_skinmap_file);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SKIN_MAP_H_
+#endif // AV1_ENCODER_SKIN_MAP_H_
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index 72e2a95..5ef444d 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -38,7 +38,7 @@
// Intra only frames, golden frames (except alt ref overlays) and
// alt ref frames tend to be coded at a higher than ambient quality
-static int frame_is_boosted(const VP10_COMP *cpi) {
+static int frame_is_boosted(const AV1_COMP *cpi) {
return frame_is_kf_gf_arf(cpi);
}
@@ -48,7 +48,7 @@
// partly on the screen area that over which they propogate. Propogation is
// limited by transform block size but the screen area take up by a given block
// size will be larger for a small image format stretched to full screen.
-static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
+static BLOCK_SIZE set_partition_min_limit(AV1_COMMON *const cm) {
unsigned int screen_area = (cm->width * cm->height);
// Select block size based on image format size.
@@ -64,10 +64,10 @@
}
}
-static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_good_speed_feature_framesize_dependent(AV1_COMP *cpi,
SPEED_FEATURES *sf,
int speed) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
@@ -115,7 +115,7 @@
// Also if the image edge is internal to the coded area.
if ((speed >= 1) && (cpi->oxcf.pass == 2) &&
((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
- (vp10_internal_image_edge(cpi)))) {
+ (av1_internal_image_edge(cpi)))) {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
}
@@ -129,7 +129,7 @@
}
}
-static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
+static void set_good_speed_feature(AV1_COMP *cpi, AV1_COMMON *cm,
SPEED_FEATURES *sf, int speed) {
const int boosted = frame_is_boosted(cpi);
@@ -138,7 +138,7 @@
if (speed >= 1) {
if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
- vp10_internal_image_edge(cpi)) {
+ av1_internal_image_edge(cpi)) {
sf->use_square_partition_only = !frame_is_boosted(cpi);
} else {
sf->use_square_partition_only = !frame_is_intra_only(cm);
@@ -227,10 +227,10 @@
}
}
-static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_rt_speed_feature_framesize_dependent(AV1_COMP *cpi,
SPEED_FEATURES *sf,
int speed) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
@@ -264,9 +264,9 @@
}
}
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
+static void set_rt_speed_feature(AV1_COMP *cpi, SPEED_FEATURES *sf, int speed,
aom_tune_content content) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int is_keyframe = cm->frame_type == KEY_FRAME;
const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
sf->static_segmentation = 0;
@@ -411,9 +411,9 @@
}
}
-void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_dependent(AV1_COMP *cpi) {
SPEED_FEATURES *const sf = &cpi->sf;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
RD_OPT *const rd = &cpi->rd;
int i;
@@ -440,11 +440,11 @@
}
}
-void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_independent(AV1_COMP *cpi) {
SPEED_FEATURES *const sf = &cpi->sf;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
int i;
// best quality defaults
@@ -520,8 +520,8 @@
else if (oxcf->mode == GOOD)
set_good_speed_feature(cpi, cm, sf, oxcf->speed);
- cpi->full_search_sad = vp10_full_search_sad;
- cpi->diamond_search_sad = vp10_diamond_search_sad;
+ cpi->full_search_sad = av1_full_search_sad;
+ cpi->diamond_search_sad = av1_diamond_search_sad;
sf->allow_exhaustive_searches = 1;
if (oxcf->mode == BEST) {
@@ -562,14 +562,14 @@
}
if (sf->mv.subpel_search_method == SUBPEL_TREE) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
+ cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_more;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
cpi->find_fractional_mv_step =
- vp10_find_best_sub_pixel_tree_pruned_evenmore;
+ av1_find_best_sub_pixel_tree_pruned_evenmore;
}
#if !CONFIG_AOM_QM
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index ff02127..349537a 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_SPEED_FEATURES_H_
-#define VP10_ENCODER_SPEED_FEATURES_H_
+#ifndef AV1_ENCODER_SPEED_FEATURES_H_
+#define AV1_ENCODER_SPEED_FEATURES_H_
#include "av1/common/enums.h"
@@ -415,17 +415,17 @@
// Allow skipping partition search for still image frame
int allow_partition_search_skip;
- // Fast approximation of vp10_model_rd_from_var_lapndz
+ // Fast approximation of av1_model_rd_from_var_lapndz
int simple_model_rd_from_var;
} SPEED_FEATURES;
-struct VP10_COMP;
+struct AV1_COMP;
-void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi);
-void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi);
+void av1_set_speed_features_framesize_independent(struct AV1_COMP *cpi);
+void av1_set_speed_features_framesize_dependent(struct AV1_COMP *cpi);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SPEED_FEATURES_H_
+#endif // AV1_ENCODER_SPEED_FEATURES_H_
diff --git a/av1/encoder/subexp.c b/av1/encoder/subexp.c
index d1930a2..a89ebca 100644
--- a/av1/encoder/subexp.c
+++ b/av1/encoder/subexp.c
@@ -15,7 +15,7 @@
#include "av1/encoder/cost.h"
#include "av1/encoder/subexp.h"
-#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define av1_cost_upd256 ((int)(av1_cost_one(upd) - av1_cost_zero(upd)))
static const uint8_t update_bits[255] = {
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -116,12 +116,12 @@
}
}
-void vp10_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
+void av1_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
const int delp = remap_prob(newp, oldp);
encode_term_subexp(w, delp);
}
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
aom_prob *bestp, aom_prob upd) {
const int old_b = cost_branch256(ct, oldp);
int bestsavings = 0;
@@ -130,7 +130,7 @@
for (newp = *bestp; newp != oldp; newp += step) {
const int new_b = cost_branch256(ct, newp);
- const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+ const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
const int savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -141,14 +141,14 @@
return bestsavings;
}
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
const aom_prob *oldp,
aom_prob *bestp, aom_prob upd,
int stepsize) {
int i, old_b, new_b, update_b, savings, bestsavings, step;
int newp;
aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
- vp10_model_to_full_probs(oldp, oldplist);
+ av1_model_to_full_probs(oldp, oldplist);
memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
old_b += cost_branch256(ct + 2 * i, oldplist[i]);
@@ -162,12 +162,12 @@
for (newp = *bestp; newp > oldp[PIVOT_NODE]; newp += step) {
if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
- vp10_model_to_full_probs(newplist, newplist);
+ av1_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += cost_branch256(ct + 2 * i, newplist[i]);
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
update_b =
- prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+ prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -179,12 +179,12 @@
for (newp = *bestp; newp < oldp[PIVOT_NODE]; newp += step) {
if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
- vp10_model_to_full_probs(newplist, newplist);
+ av1_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += cost_branch256(ct + 2 * i, newplist[i]);
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
update_b =
- prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+ prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -197,27 +197,27 @@
return bestsavings;
}
-void vp10_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
+void av1_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
const unsigned int ct[2]) {
const aom_prob upd = DIFF_UPDATE_PROB;
aom_prob newp = get_binary_prob(ct[0], ct[1]);
const int savings =
- vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+ av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
assert(newp >= 1);
if (savings > 0) {
aom_write(w, 1, upd);
- vp10_write_prob_diff_update(w, newp, *oldp);
+ av1_write_prob_diff_update(w, newp, *oldp);
*oldp = newp;
} else {
aom_write(w, 0, upd);
}
}
-int vp10_cond_prob_diff_update_savings(aom_prob *oldp,
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
const unsigned int ct[2]) {
const aom_prob upd = DIFF_UPDATE_PROB;
aom_prob newp = get_binary_prob(ct[0], ct[1]);
const int savings =
- vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+ av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
return savings;
}
diff --git a/av1/encoder/subexp.h b/av1/encoder/subexp.h
index 9d870f5..d8142ba 100644
--- a/av1/encoder/subexp.h
+++ b/av1/encoder/subexp.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_SUBEXP_H_
-#define VP10_ENCODER_SUBEXP_H_
+#ifndef AV1_ENCODER_SUBEXP_H_
+#define AV1_ENCODER_SUBEXP_H_
#ifdef __cplusplus
extern "C" {
@@ -20,24 +20,24 @@
struct aom_writer;
-void vp10_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
+void av1_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
aom_prob oldp);
-void vp10_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
+void av1_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
const unsigned int ct[2]);
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
aom_prob *bestp, aom_prob upd);
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
const aom_prob *oldp,
aom_prob *bestp, aom_prob upd,
int stepsize);
-int vp10_cond_prob_diff_update_savings(aom_prob *oldp,
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
const unsigned int ct[2]);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_SUBEXP_H_
+#endif // AV1_ENCODER_SUBEXP_H_
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index cdad633..31dda5b 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -38,7 +38,7 @@
const int which_mv = 0;
const MV mv = { mv_row, mv_col };
const InterpKernel *const kernel =
- vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
+ av1_filter_kernels[xd->mi[0]->mbmi.interp_filter];
enum mv_precision mv_precision_uv;
int uv_stride;
@@ -50,37 +50,37 @@
mv_precision_uv = MV_PRECISION_Q3;
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
+ av1_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
scale, 16, 16, which_mv, kernel,
MV_PRECISION_Q3, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+ av1_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
uv_block_width, &mv, scale,
uv_block_width, uv_block_height, which_mv,
kernel, mv_precision_uv, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+ av1_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
uv_block_width, &mv, scale,
uv_block_width, uv_block_height, which_mv,
kernel, mv_precision_uv, x, y, xd->bd);
return;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
- vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
which_mv, kernel, MV_PRECISION_Q3, x, y);
- vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+ av1_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
&mv, scale, uv_block_width, uv_block_height,
which_mv, kernel, mv_precision_uv, x, y);
- vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+ av1_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
&mv, scale, uv_block_width, uv_block_height,
which_mv, kernel, mv_precision_uv, x, y);
}
-void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+void av1_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
uint8_t *frame2, unsigned int block_width,
unsigned int block_height, int strength,
int filter_weight, unsigned int *accumulator,
@@ -119,8 +119,8 @@
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_temporal_filter_apply_c(
uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
unsigned int block_width, unsigned int block_height, int strength,
int filter_weight, unsigned int *accumulator, uint16_t *count) {
@@ -159,9 +159,9 @@
byte += stride - block_width;
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
+static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
uint8_t *arf_frame_buf,
uint8_t *frame_ptr_buf,
int stride) {
@@ -196,7 +196,7 @@
step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
// Ignore mv costing by sending NULL pointer instead of cost arrays
- vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
+ av1_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
&best_ref_mv1, ref_mv);
@@ -214,7 +214,7 @@
return bestsme;
}
-static void temporal_filter_iterate_c(VP10_COMP *cpi,
+static void temporal_filter_iterate_c(AV1_COMP *cpi,
YV12_BUFFER_CONFIG **frames,
int frame_count, int alt_ref_index,
int strength,
@@ -232,7 +232,7 @@
MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
uint8_t *dst1, *dst2;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
uint8_t *predictor;
@@ -245,7 +245,7 @@
// Save input state
uint8_t *input_buffer[MAX_MB_PLANE];
int i;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
predictor = CONVERT_TO_BYTEPTR(predictor16);
} else {
@@ -315,53 +315,53 @@
mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
mb_col * 16, mb_row * 16);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int adj_strength = strength + 2 * (mbd->bd - 8);
// Apply the filter (YUV)
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
adj_strength, filter_weight, accumulator, count);
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
mb_uv_width, mb_uv_height, adj_strength, filter_weight,
accumulator + 256, count + 256);
- vp10_highbd_temporal_filter_apply(
+ av1_highbd_temporal_filter_apply(
f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
mb_uv_width, mb_uv_height, adj_strength, filter_weight,
accumulator + 512, count + 512);
} else {
// Apply the filter (YUV)
- vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+ av1_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
predictor, 16, 16, strength,
filter_weight, accumulator, count);
- vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+ av1_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
predictor + 256, mb_uv_width,
mb_uv_height, strength, filter_weight,
accumulator + 256, count + 256);
- vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+ av1_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
predictor + 512, mb_uv_width,
mb_uv_height, strength, filter_weight,
accumulator + 512, count + 512);
}
#else
// Apply the filter (YUV)
- vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+ av1_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
predictor, 16, 16, strength, filter_weight,
accumulator, count);
- vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+ av1_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
predictor + 256, mb_uv_width, mb_uv_height,
strength, filter_weight, accumulator + 256,
count + 256);
- vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+ av1_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
predictor + 512, mb_uv_width, mb_uv_height,
strength, filter_weight, accumulator + 512,
count + 512);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *dst1_16;
uint16_t *dst2_16;
@@ -481,7 +481,7 @@
}
byte += stride - mb_uv_width;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
mb_y_offset += 16;
mb_uv_offset += mb_uv_width;
}
@@ -494,11 +494,11 @@
}
// Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
+static void adjust_arnr_filter(AV1_COMP *cpi, int distance, int group_boost,
int *arnr_frames, int *arnr_strength) {
- const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+ const AV1EncoderConfig *const oxcf = &cpi->oxcf;
const int frames_after_arf =
- vp10_lookahead_depth(cpi->lookahead) - distance - 1;
+ av1_lookahead_depth(cpi->lookahead) - distance - 1;
int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
int frames_bwd;
int q, frames, strength;
@@ -518,10 +518,10 @@
// Adjust the strength based on active max q.
if (cpi->common.current_video_frame > 1)
- q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+ q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
cpi->common.bit_depth));
else
- q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+ q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
cpi->common.bit_depth));
if (q > 16) {
strength = oxcf->arnr_strength;
@@ -552,7 +552,7 @@
*arnr_strength = strength;
}
-void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
+void av1_temporal_filter(AV1_COMP *cpi, int distance) {
RATE_CONTROL *const rc = &cpi->rc;
int frame;
int frames_to_blur;
@@ -573,7 +573,7 @@
for (frame = 0; frame < frames_to_blur; ++frame) {
const int which_buffer = start_frame - frame;
struct lookahead_entry *buf =
- vp10_lookahead_peek(cpi->lookahead, which_buffer);
+ av1_lookahead_peek(cpi->lookahead, which_buffer);
frames[frames_to_blur - 1 - frame] = &buf->img;
}
@@ -581,16 +581,16 @@
// Setup scaling factors. Scaling on each of the arnr frames is not
// supported.
// ARF is produced at the native frame size and resized when coded.
-#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
frames[0]->y_crop_width, frames[0]->y_crop_height,
cpi->common.use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(
+ av1_setup_scale_factors_for_frame(
&sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
frames[0]->y_crop_width, frames[0]->y_crop_height);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
temporal_filter_iterate_c(cpi, frames, frames_to_blur,
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
index 6b68cb7..bc0863a 100644
--- a/av1/encoder/temporal_filter.h
+++ b/av1/encoder/temporal_filter.h
@@ -9,17 +9,17 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_
-#define VP10_ENCODER_TEMPORAL_FILTER_H_
+#ifndef AV1_ENCODER_TEMPORAL_FILTER_H_
+#define AV1_ENCODER_TEMPORAL_FILTER_H_
#ifdef __cplusplus
extern "C" {
#endif
-void vp10_temporal_filter(VP10_COMP *cpi, int distance);
+void av1_temporal_filter(AV1_COMP *cpi, int distance);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_TEMPORAL_FILTER_H_
+#endif // AV1_ENCODER_TEMPORAL_FILTER_H_
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index 21fd17c..0c38681 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -46,13 +46,13 @@
{ 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
{ 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
};
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+const TOKENVALUE *av1_dct_cat_lt_10_value_tokens =
dct_cat_lt_10_value_tokens +
(sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
2;
// Array indices are identical to previously-existing CONTEXT_NODE indices
-const aom_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-EOB_TOKEN,
2, // 0 = EOB
-ZERO_TOKEN,
@@ -100,7 +100,7 @@
2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
};
-const int16_t vp10_cat6_low_cost[256] = {
+const int16_t av1_cat6_low_cost[256] = {
3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
@@ -122,7 +122,7 @@
6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
};
-const int vp10_cat6_high_cost[64] = {
+const int av1_cat6_high_cost[64] = {
88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305,
8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889,
9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666,
@@ -131,8 +131,8 @@
15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
};
-#if CONFIG_VPX_HIGHBITDEPTH
-const int vp10_cat6_high10_high_cost[256] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const int av1_cat6_high10_high_cost[256] = {
94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311,
8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895,
9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672,
@@ -158,7 +158,7 @@
18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
24237, 24713, 26876
};
-const int vp10_cat6_high12_high_cost[1024] = {
+const int av1_cat6_high12_high_cost[1024] = {
100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317,
8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901,
9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678,
@@ -256,7 +256,7 @@
};
#endif
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static const aom_tree_index cat1_high10[2] = { 0, 0 };
static const aom_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
static const aom_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
@@ -277,59 +277,59 @@
};
#endif
-const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
+const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS] = {
{ 0, 0, 0, 0, zero_cost }, // ZERO_TOKEN
{ 0, 0, 0, 1, sign_cost }, // ONE_TOKEN
{ 0, 0, 0, 2, sign_cost }, // TWO_TOKEN
{ 0, 0, 0, 3, sign_cost }, // THREE_TOKEN
{ 0, 0, 0, 4, sign_cost }, // FOUR_TOKEN
- { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
- { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
- { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
- { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
- { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
- { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
+ { cat1, av1_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
+ { cat2, av1_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
+ { cat3, av1_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
+ { cat4, av1_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
+ { cat5, av1_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
+ { cat6, av1_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
{ 0, 0, 0, 0, zero_cost } // EOB_TOKEN
};
-#if CONFIG_VPX_HIGHBITDEPTH
-const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS] = {
{ 0, 0, 0, 0, zero_cost }, // ZERO
{ 0, 0, 0, 1, sign_cost }, // ONE
{ 0, 0, 0, 2, sign_cost }, // TWO
{ 0, 0, 0, 3, sign_cost }, // THREE
{ 0, 0, 0, 4, sign_cost }, // FOUR
- { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
- { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
- { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
- { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
- { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
- { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 }, // CAT6
+ { cat1_high10, av1_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high10, av1_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high10, av1_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high10, av1_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high10, av1_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high10, av1_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 }, // CAT6
{ 0, 0, 0, 0, zero_cost } // EOB
};
-const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
+const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS] = {
{ 0, 0, 0, 0, zero_cost }, // ZERO
{ 0, 0, 0, 1, sign_cost }, // ONE
{ 0, 0, 0, 2, sign_cost }, // TWO
{ 0, 0, 0, 3, sign_cost }, // THREE
{ 0, 0, 0, 4, sign_cost }, // FOUR
- { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
- { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
- { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
- { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
- { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
- { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
+ { cat1_high12, av1_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high12, av1_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high12, av1_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high12, av1_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high12, av1_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high12, av1_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
{ 0, 0, 0, 0, zero_cost } // EOB
};
#endif
-const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
+const struct av1_token av1_coef_encodings[ENTROPY_TOKENS] = {
{ 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 },
{ 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
};
struct tokenize_b_args {
- VP10_COMP *cpi;
+ AV1_COMP *cpi;
ThreadData *td;
TOKENEXTRA **tp;
};
@@ -343,7 +343,7 @@
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
- vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+ av1_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
blk_row);
}
@@ -378,7 +378,7 @@
static void tokenize_b(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct tokenize_b_args *const args = arg;
- VP10_COMP *cpi = args->cpi;
+ AV1_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -431,13 +431,13 @@
v = qcoeff[scan[c]];
}
- vp10_get_token_extra(v, &token, &extra);
+ av1_get_token_extra(v, &token, &extra);
add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token,
(uint8_t)skip_eob, counts[band[c]][pt]);
eob_branch[band[c]][pt] += !skip_eob;
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
pt = get_coef_context(nb, token_cache, c);
}
@@ -449,7 +449,7 @@
*tp = t;
- vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
+ av1_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
}
struct is_skippable_args {
@@ -468,11 +468,11 @@
}
// TODO(yaowu): rewrite and optimize this function to remove the usage of
-// vp10_foreach_transform_block() and simplify is_skippable().
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+// av1_foreach_transform_block() and simplify is_skippable().
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
struct is_skippable_args args = { x->plane[plane].eobs, &result };
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
&args);
return result;
}
@@ -490,21 +490,21 @@
*(args->skippable) |= (args->eobs[block] > eobs);
}
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 0;
struct is_skippable_args args = { x->plane[plane].eobs, &result };
- vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
+ av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
has_high_freq_coeff, &args);
return result;
}
-void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int dry_run, BLOCK_SIZE bsize) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- const int ctx = vp10_get_skip_context(xd);
+ const int ctx = av1_get_skip_context(xd);
const int skip_inc =
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
struct tokenize_b_args arg = { cpi, td, t };
@@ -520,12 +520,12 @@
td->counts->skip[ctx][0] += skip_inc;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+ av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
&arg);
(*t)->token = EOSB_TOKEN;
(*t)++;
}
} else {
- vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+ av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
}
}
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index a7a37cb..3ab8193 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_TOKENIZE_H_
-#define VP10_ENCODER_TOKENIZE_H_
+#ifndef AV1_ENCODER_TOKENIZE_H_
+#define AV1_ENCODER_TOKENIZE_H_
#include "av1/common/entropy.h"
@@ -23,7 +23,7 @@
#define EOSB_TOKEN 127 // Not signalled, encoder only
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef int32_t EXTRABIT;
#else
typedef int16_t EXTRABIT;
@@ -41,52 +41,52 @@
uint8_t skip_eob_node;
} TOKENEXTRA;
-extern const aom_tree_index vp10_coef_tree[];
-extern const aom_tree_index vp10_coef_con_tree[];
-extern const struct vp10_token vp10_coef_encodings[];
+extern const aom_tree_index av1_coef_tree[];
+extern const aom_tree_index av1_coef_con_tree[];
+extern const struct av1_token av1_coef_encodings[];
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-struct VP10_COMP;
+struct AV1_COMP;
struct ThreadData;
-void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
+void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
-extern const int16_t *vp10_dct_value_cost_ptr;
+extern const int16_t *av1_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the
* fields are not.
*/
-extern const TOKENVALUE *vp10_dct_value_tokens_ptr;
-extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens;
-extern const int16_t vp10_cat6_low_cost[256];
-extern const int vp10_cat6_high_cost[64];
-extern const int vp10_cat6_high10_high_cost[256];
-extern const int vp10_cat6_high12_high_cost[1024];
-static INLINE int vp10_get_cost(int16_t token, EXTRABIT extrabits,
+extern const TOKENVALUE *av1_dct_value_tokens_ptr;
+extern const TOKENVALUE *av1_dct_cat_lt_10_value_tokens;
+extern const int16_t av1_cat6_low_cost[256];
+extern const int av1_cat6_high_cost[64];
+extern const int av1_cat6_high10_high_cost[256];
+extern const int av1_cat6_high12_high_cost[1024];
+static INLINE int av1_get_cost(int16_t token, EXTRABIT extrabits,
const int *cat6_high_table) {
if (token != CATEGORY6_TOKEN)
- return vp10_extra_bits[token].cost[extrabits >> 1];
- return vp10_cat6_low_cost[(extrabits >> 1) & 0xff] +
+ return av1_extra_bits[token].cost[extrabits >> 1];
+ return av1_cat6_low_cost[(extrabits >> 1) & 0xff] +
cat6_high_table[extrabits >> 9];
}
-#if CONFIG_VPX_HIGHBITDEPTH
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
- return bit_depth == 8 ? vp10_cat6_high_cost
- : (bit_depth == 10 ? vp10_cat6_high10_high_cost
- : vp10_cat6_high12_high_cost);
+#if CONFIG_AOM_HIGHBITDEPTH
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
+ return bit_depth == 8 ? av1_cat6_high_cost
+ : (bit_depth == 10 ? av1_cat6_high10_high_cost
+ : av1_cat6_high12_high_cost);
}
#else
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
(void)bit_depth;
- return vp10_cat6_high_cost;
+ return av1_cat6_high_cost;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-static INLINE void vp10_get_token_extra(int v, int16_t *token,
+static INLINE void av1_get_token_extra(int v, int16_t *token,
EXTRABIT *extra) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
*token = CATEGORY6_TOKEN;
@@ -96,16 +96,16 @@
*extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
return;
}
- *token = vp10_dct_cat_lt_10_value_tokens[v].token;
- *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
+ *token = av1_dct_cat_lt_10_value_tokens[v].token;
+ *extra = av1_dct_cat_lt_10_value_tokens[v].extra;
}
-static INLINE int16_t vp10_get_token(int v) {
+static INLINE int16_t av1_get_token(int v) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
- return vp10_dct_cat_lt_10_value_tokens[v].token;
+ return av1_dct_cat_lt_10_value_tokens[v].token;
}
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_TOKENIZE_H_
+#endif // AV1_ENCODER_TOKENIZE_H_
diff --git a/av1/encoder/treewriter.c b/av1/encoder/treewriter.c
index e1b9887..7c35b56 100644
--- a/av1/encoder/treewriter.c
+++ b/av1/encoder/treewriter.c
@@ -11,7 +11,7 @@
#include "av1/encoder/treewriter.h"
-static void tree2tok(struct vp10_token *tokens, const aom_tree_index *tree,
+static void tree2tok(struct av1_token *tokens, const aom_tree_index *tree,
int i, int v, int l) {
v += v;
++l;
@@ -27,7 +27,7 @@
} while (++v & 1);
}
-void vp10_tokens_from_tree(struct vp10_token *tokens,
+void av1_tokens_from_tree(struct av1_token *tokens,
const aom_tree_index *tree) {
tree2tok(tokens, tree, 0, 0, 0);
}
@@ -52,7 +52,7 @@
return left + right;
}
-void vp10_tree_probs_from_distribution(aom_tree tree,
+void av1_tree_probs_from_distribution(aom_tree tree,
unsigned int branch_ct[/* n-1 */][2],
const unsigned int num_events[/* n */]) {
convert_distribution(0, tree, branch_ct, num_events);
diff --git a/av1/encoder/treewriter.h b/av1/encoder/treewriter.h
index 306bd4a..38ab5a6 100644
--- a/av1/encoder/treewriter.h
+++ b/av1/encoder/treewriter.h
@@ -9,8 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#ifndef VP10_ENCODER_TREEWRITER_H_
-#define VP10_ENCODER_TREEWRITER_H_
+#ifndef AV1_ENCODER_TREEWRITER_H_
+#define AV1_ENCODER_TREEWRITER_H_
#include "aom_dsp/bitwriter.h"
@@ -18,18 +18,18 @@
extern "C" {
#endif
-void vp10_tree_probs_from_distribution(aom_tree tree,
+void av1_tree_probs_from_distribution(aom_tree tree,
unsigned int branch_ct[/* n - 1 */][2],
const unsigned int num_events[/* n */]);
-struct vp10_token {
+struct av1_token {
int value;
int len;
};
-void vp10_tokens_from_tree(struct vp10_token *, const aom_tree_index *);
+void av1_tokens_from_tree(struct av1_token *, const aom_tree_index *);
-static INLINE void vp10_write_tree(aom_writer *w, const aom_tree_index *tree,
+static INLINE void av1_write_tree(aom_writer *w, const aom_tree_index *tree,
const aom_prob *probs, int bits, int len,
aom_tree_index i) {
do {
@@ -39,14 +39,14 @@
} while (len);
}
-static INLINE void vp10_write_token(aom_writer *w, const aom_tree_index *tree,
+static INLINE void av1_write_token(aom_writer *w, const aom_tree_index *tree,
const aom_prob *probs,
- const struct vp10_token *token) {
- vp10_write_tree(w, tree, probs, token->value, token->len, 0);
+ const struct av1_token *token) {
+ av1_write_tree(w, tree, probs, token->value, token->len, 0);
}
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_TREEWRITER_H_
+#endif // AV1_ENCODER_TREEWRITER_H_
diff --git a/av1/encoder/x86/dct_mmx.asm b/av1/encoder/x86/dct_mmx.asm
index 34ce315..2154300 100644
--- a/av1/encoder/x86/dct_mmx.asm
+++ b/av1/encoder/x86/dct_mmx.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
@@ -65,7 +65,7 @@
psllw m2, 2
psllw m3, 2
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
pxor m4, m4
pxor m5, m5
pcmpgtw m4, m0
diff --git a/av1/encoder/x86/dct_sse2.c b/av1/encoder/x86/dct_sse2.c
index 54a9d2d..693fed5 100644
--- a/av1/encoder/x86/dct_sse2.c
+++ b/av1/encoder/x86/dct_sse2.c
@@ -152,7 +152,7 @@
transpose_4x4(in);
}
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
__m128i in[4];
@@ -180,7 +180,7 @@
}
}
-void vp10_fdct8x8_quant_sse2(
+void av1_fdct8x8_quant_sse2(
const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
@@ -1129,7 +1129,7 @@
array_transpose_8x8(in, in);
}
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
__m128i in[8];
@@ -2012,7 +2012,7 @@
array_transpose_16x16(in0, in1);
}
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
int tx_type) {
__m128i in0[16], in1[16];
diff --git a/av1/encoder/x86/dct_ssse3.c b/av1/encoder/x86/dct_ssse3.c
index 91cbec2..0b5ae83 100644
--- a/av1/encoder/x86/dct_ssse3.c
+++ b/av1/encoder/x86/dct_ssse3.c
@@ -21,7 +21,7 @@
#include "aom_dsp/x86/inv_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
-void vp10_fdct8x8_quant_ssse3(
+void av1_fdct8x8_quant_ssse3(
const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
diff --git a/av1/encoder/x86/error_intrin_avx2.c b/av1/encoder/x86/error_intrin_avx2.c
index 5f60c3c..c1d8899 100644
--- a/av1/encoder/x86/error_intrin_avx2.c
+++ b/av1/encoder/x86/error_intrin_avx2.c
@@ -14,7 +14,7 @@
#include "./av1_rtcd.h"
#include "aom/aom_integer.h"
-int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
intptr_t block_size, int64_t *ssz) {
__m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
__m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
diff --git a/av1/encoder/x86/error_sse2.asm b/av1/encoder/x86/error_sse2.asm
index 0772da4..44a52d7 100644
--- a/av1/encoder/x86/error_sse2.asm
+++ b/av1/encoder/x86/error_sse2.asm
@@ -8,13 +8,13 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
SECTION .text
-; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t av1_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
; int64_t *ssz)
INIT_XMM sse2
@@ -76,7 +76,7 @@
RET
; Compute the sum of squared difference between two int16_t vectors.
-; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+; int64_t av1_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
; intptr_t block_size)
INIT_XMM sse2
diff --git a/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
index 3a4a47a..e105d54 100644
--- a/av1/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -14,7 +14,7 @@
#include "av1/common/common.h"
-int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
+int64_t av1_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz,
int bps) {
int i, j, test;
diff --git a/av1/encoder/x86/quantize_sse2.c b/av1/encoder/x86/quantize_sse2.c
index 44b44d0..50eee35 100644
--- a/av1/encoder/x86/quantize_sse2.c
+++ b/av1/encoder/x86/quantize_sse2.c
@@ -15,7 +15,7 @@
#include "./av1_rtcd.h"
#include "aom/aom_integer.h"
-void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t* zbin_ptr,
const int16_t* round_ptr, const int16_t* quant_ptr,
const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
diff --git a/av1/encoder/x86/quantize_ssse3_x86_64.asm b/av1/encoder/x86/quantize_ssse3_x86_64.asm
index b8fefa2..05e0be6 100644
--- a/av1/encoder/x86/quantize_ssse3_x86_64.asm
+++ b/av1/encoder/x86/quantize_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vp10
+%define private_prefix av1
%include "third_party/x86inc/x86inc.asm"
diff --git a/av1/encoder/x86/ssim_opt_x86_64.asm b/av1/encoder/x86/ssim_opt_x86_64.asm
index 29659ee..4b5c450 100644
--- a/av1/encoder/x86/ssim_opt_x86_64.asm
+++ b/av1/encoder/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vp10_ssim_parms_16x16_sse2) PRIVATE
-sym(vp10_ssim_parms_16x16_sse2):
+global sym(av1_ssim_parms_16x16_sse2) PRIVATE
+sym(av1_ssim_parms_16x16_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vp10_ssim_parms_8x8_sse2) PRIVATE
-sym(vp10_ssim_parms_8x8_sse2):
+global sym(av1_ssim_parms_8x8_sse2) PRIVATE
+sym(av1_ssim_parms_8x8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
diff --git a/av1/encoder/x86/temporal_filter_apply_sse2.asm b/av1/encoder/x86/temporal_filter_apply_sse2.asm
index eabe575..15de6e8 100644
--- a/av1/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/av1/encoder/x86/temporal_filter_apply_sse2.asm
@@ -11,7 +11,7 @@
%include "aom_ports/x86_abi_support.asm"
-; void vp10_temporal_filter_apply_sse2 | arg
+; void av1_temporal_filter_apply_sse2 | arg
; (unsigned char *frame1, | 0
; unsigned int stride, | 1
; unsigned char *frame2, | 2
@@ -21,8 +21,8 @@
; int filter_weight, | 6
; unsigned int *accumulator, | 7
; unsigned short *count) | 8
-global sym(vp10_temporal_filter_apply_sse2) PRIVATE
-sym(vp10_temporal_filter_apply_sse2):
+global sym(av1_temporal_filter_apply_sse2) PRIVATE
+sym(av1_temporal_filter_apply_sse2):
push rbp
mov rbp, rsp
diff --git a/av1/exports_dec b/av1/exports_dec
index 67f427f..05860e8 100644
--- a/av1/exports_dec
+++ b/av1/exports_dec
@@ -1,2 +1,2 @@
-data aom_codec_vp10_dx_algo
-text aom_codec_vp10_dx
+data aom_codec_av1_dx_algo
+text aom_codec_av1_dx
diff --git a/av1/exports_enc b/av1/exports_enc
index f55fef9..dc4a9ea 100644
--- a/av1/exports_enc
+++ b/av1/exports_enc
@@ -1,2 +1,2 @@
-data aom_codec_vp10_cx_algo
-text aom_codec_vp10_cx
+data aom_codec_av1_cx_algo
+text aom_codec_av1_cx
diff --git a/build/make/Android.mk b/build/make/Android.mk
index 0c229c4..290c974 100644
--- a/build/make/Android.mk
+++ b/build/make/Android.mk
@@ -168,7 +168,7 @@
define rtcd_dep_template
rtcd_dep_template_SRCS := $(addprefix $(LOCAL_PATH)/, $(LOCAL_SRC_FILES))
rtcd_dep_template_SRCS := $$(rtcd_dep_template_SRCS:.neon=)
-ifeq ($(CONFIG_VP10), yes)
+ifeq ($(CONFIG_AV1), yes)
$$(rtcd_dep_template_SRCS): av1_rtcd.h
endif
$$(rtcd_dep_template_SRCS): aom_scale_rtcd.h
diff --git a/configure b/configure
index 17f5b10..3bd2a90 100755
--- a/configure
+++ b/configure
@@ -35,7 +35,7 @@
${toggle_debug_libs} in/exclude debug version of libraries
${toggle_static_msvcrt} use static MSVCRT (VS builds only)
${toggle_aom_highbitdepth} use high bit depth (10/12) profiles
- ${toggle_vp10} VP10 codec support
+ ${toggle_av1} AV1 codec support
${toggle_internal_stats} output of encoder internal stats for debug, if supported (encoders)
${toggle_multithread} multithreaded encoding and decoding
${toggle_spatial_resampling} spatial sampling (scaling) support
@@ -185,7 +185,7 @@
fi
# disable codecs when their source directory does not exist
-[ -d "${source_path}/av1" ] || disable_feature vp10
+[ -d "${source_path}/av1" ] || disable_feature av1
# install everything except the sources, by default. sources will have
# to be enabled when doing dist builds, since that's no longer a common
@@ -203,11 +203,11 @@
enable_feature temporal_denoising
CODECS="
- vp10_encoder
- vp10_decoder
+ av1_encoder
+ av1_decoder
"
CODEC_FAMILIES="
- vp10
+ av1
"
ARCH_LIST="
diff --git a/examples.mk b/examples.mk
index c3c203e..9c3fb3d 100644
--- a/examples.mk
+++ b/examples.mk
@@ -168,10 +168,10 @@
# We should not link to math library (libm) on RVCT
# when building for bare-metal targets
ifeq ($(CONFIG_OS_SUPPORT), yes)
-CODEC_EXTRA_LIBS-$(CONFIG_VP10) += m
+CODEC_EXTRA_LIBS-$(CONFIG_AV1) += m
else
ifeq ($(CONFIG_GCC), yes)
- CODEC_EXTRA_LIBS-$(CONFIG_VP10) += m
+ CODEC_EXTRA_LIBS-$(CONFIG_AV1) += m
endif
endif
#
@@ -188,8 +188,8 @@
INC_PATH-yes := $(SRC_PATH_BARE)/../include
else
LIB_PATH-yes += $(if $(BUILD_PFX),$(BUILD_PFX),.)
- INC_PATH-$(CONFIG_VP10_DECODER) += $(SRC_PATH_BARE)/av1
- INC_PATH-$(CONFIG_VP10_ENCODER) += $(SRC_PATH_BARE)/av1
+ INC_PATH-$(CONFIG_AV1_DECODER) += $(SRC_PATH_BARE)/av1
+ INC_PATH-$(CONFIG_AV1_ENCODER) += $(SRC_PATH_BARE)/av1
endif
INC_PATH-$(CONFIG_LIBYUV) += $(SRC_PATH_BARE)/third_party/libyuv/include
LIB_PATH := $(call enabled,LIB_PATH)
diff --git a/examples/aom_temporal_svc_encoder.c b/examples/aom_temporal_svc_encoder.c
index 3e473d7..bac8fb4 100644
--- a/examples/aom_temporal_svc_encoder.c
+++ b/examples/aom_temporal_svc_encoder.c
@@ -491,13 +491,13 @@
struct RateControlMetrics rc;
int64_t cx_time = 0;
const int min_args_base = 11;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_bit_depth_t bit_depth = VPX_BITS_8;
int input_bit_depth = 8;
const int min_args = min_args_base + 1;
#else
const int min_args = min_args_base;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
double sum_bitrate = 0.0;
double sum_bitrate2 = 0.0;
double framerate = 30.0;
@@ -505,7 +505,7 @@
exec_name = argv[0];
// Check usage and arguments.
if (argc < min_args) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
die(
"Usage: %s <infile> <outfile> <codec_type(vp8/vp9)> <width> <height> "
"<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
@@ -517,7 +517,7 @@
"<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
"<Rate_0> ... <Rate_nlayers-1> \n",
argv[0]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
encoder = get_aom_encoder_by_name(argv[3]);
@@ -540,7 +540,7 @@
die("Invalid number of arguments");
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (strtol(argv[argc - 1], NULL, 0)) {
case 8:
bit_depth = VPX_BITS_8;
@@ -565,7 +565,7 @@
if (!aom_img_alloc(&raw, VPX_IMG_FMT_I420, width, height, 32)) {
die("Failed to allocate image", width, height);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Populate encoder configuration.
res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
@@ -578,13 +578,13 @@
cfg.g_w = width;
cfg.g_h = height;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (bit_depth != VPX_BITS_8) {
cfg.g_bit_depth = bit_depth;
cfg.g_input_bit_depth = input_bit_depth;
cfg.g_profile = 2;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// Timebase format e.g. 30fps: numerator=1, demoninator = 30.
cfg.g_timebase.num = strtol(argv[6], NULL, 0);
@@ -667,13 +667,13 @@
cfg.ss_number_layers = 1;
// Initialize codec.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (aom_codec_enc_init(
&codec, encoder->codec_interface(), &cfg,
bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH))
#else
if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
die_codec(&codec, "Failed to initialize encoder");
if (strncmp(encoder->name, "vp8", 3) == 0) {
diff --git a/libs.mk b/libs.mk
index bee9204..194d8ea 100644
--- a/libs.mk
+++ b/libs.mk
@@ -53,39 +53,39 @@
include $(SRC_PATH_BARE)/aom_util/aom_util.mk
CODEC_SRCS-yes += $(addprefix aom_util/,$(call enabled,UTIL_SRCS))
-# VP10 make file
-ifeq ($(CONFIG_VP10),yes)
- VP10_PREFIX=av1/
- include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_common.mk
+# AV1 make file
+ifeq ($(CONFIG_AV1),yes)
+ AV1_PREFIX=av1/
+ include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_common.mk
endif
-ifeq ($(CONFIG_VP10_ENCODER),yes)
- VP10_PREFIX=av1/
- include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_cx.mk
- CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_CX_SRCS))
- CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_CX_EXPORTS))
- CODEC_SRCS-yes += $(VP10_PREFIX)av1_cx.mk aom/vp8.h aom/vp8cx.h
+ifeq ($(CONFIG_AV1_ENCODER),yes)
+ AV1_PREFIX=av1/
+ include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_cx.mk
+ CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_CX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_CX_EXPORTS))
+ CODEC_SRCS-yes += $(AV1_PREFIX)av1_cx.mk aom/vp8.h aom/vp8cx.h
INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8cx.h
INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/aom/svc_context.h
- INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
+ INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
CODEC_DOC_SRCS += aom/vp8.h aom/vp8cx.h
- CODEC_DOC_SECTIONS += vp10 vp10_encoder
+ CODEC_DOC_SECTIONS += av1 av1_encoder
endif
-ifeq ($(CONFIG_VP10_DECODER),yes)
- VP10_PREFIX=av1/
- include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_dx.mk
- CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_DX_SRCS))
- CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_DX_EXPORTS))
- CODEC_SRCS-yes += $(VP10_PREFIX)av1_dx.mk aom/vp8.h aom/vp8dx.h
+ifeq ($(CONFIG_AV1_DECODER),yes)
+ AV1_PREFIX=av1/
+ include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_dx.mk
+ CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_DX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_DX_EXPORTS))
+ CODEC_SRCS-yes += $(AV1_PREFIX)av1_dx.mk aom/vp8.h aom/vp8dx.h
INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8dx.h
- INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
+ INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
CODEC_DOC_SRCS += aom/vp8.h aom/vp8dx.h
- CODEC_DOC_SECTIONS += vp10 vp10_decoder
+ CODEC_DOC_SECTIONS += av1 av1_decoder
endif
-VP10_PREFIX=av1/
-$(BUILD_PFX)$(VP10_PREFIX)%.c.o: CFLAGS += -Wextra
+AV1_PREFIX=av1/
+$(BUILD_PFX)$(AV1_PREFIX)%.c.o: CFLAGS += -Wextra
ifeq ($(CONFIG_ENCODERS),yes)
CODEC_DOC_SECTIONS += encoder
diff --git a/test/active_map_refresh_test.cc b/test/active_map_refresh_test.cc
index a95c664..f53ffc8 100644
--- a/test/active_map_refresh_test.cc
+++ b/test/active_map_refresh_test.cc
@@ -121,7 +121,7 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
+AV1_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
::testing::Values(::libaom_test::kRealTime),
::testing::Range(5, 6));
} // namespace
diff --git a/test/active_map_test.cc b/test/active_map_test.cc
index d633dc3..d371f3d 100644
--- a/test/active_map_test.cc
+++ b/test/active_map_test.cc
@@ -79,7 +79,7 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(ActiveMapTest,
+AV1_INSTANTIATE_TEST_CASE(ActiveMapTest,
::testing::Values(::libaom_test::kRealTime),
::testing::Range(0, 6));
} // namespace
diff --git a/test/aq_segment_test.cc b/test/aq_segment_test.cc
index e7b8ade..25863aa 100644
--- a/test/aq_segment_test.cc
+++ b/test/aq_segment_test.cc
@@ -102,7 +102,7 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(AqSegmentTest,
+AV1_INSTANTIATE_TEST_CASE(AqSegmentTest,
::testing::Values(::libaom_test::kRealTime,
::libaom_test::kOnePassGood),
::testing::Range(3, 9));
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 9ba1e3b..90d0d09 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -49,9 +49,9 @@
{ "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420,
VPX_BITS_8, 0 },
{ "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Add list of profile 2/3 test videos here ...
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
const TestEncodeParam kEncodeVectors[] = {
@@ -62,7 +62,7 @@
const int kMinArfVectors[] = {
// NOTE: 0 refers to the default built-in logic in:
- // vp10_rc_get_default_min_gf_interval(...)
+ // av1_rc_get_default_min_gf_interval(...)
0, 4, 8, 12, 15
};
@@ -164,7 +164,7 @@
if (min_arf_requested_)
return min_arf_requested_;
else
- return vp10_rc_get_default_min_gf_interval(
+ return av1_rc_get_default_min_gf_interval(
test_video_param_.width, test_video_param_.height,
(double)test_video_param_.framerate_num /
test_video_param_.framerate_den);
@@ -210,20 +210,20 @@
delete (video);
}
-#if CONFIG_VPX_HIGHBITDEPTH
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1_ENCODER
// TODO(angiebird): 25-29 fail in high bitdepth mode.
INSTANTIATE_TEST_CASE_P(
- DISABLED_VP10, ArfFreqTest,
+ DISABLED_AV1, ArfFreqTest,
::testing::Combine(
::testing::Values(static_cast<const libaom_test::CodecFactory *>(
- &libaom_test::kVP10)),
+ &libaom_test::kAV1)),
::testing::ValuesIn(kTestVectors), ::testing::ValuesIn(kEncodeVectors),
::testing::ValuesIn(kMinArfVectors)));
-#endif // CONFIG_VP10_ENCODER
+#endif // CONFIG_AV1_ENCODER
#else
-VP10_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors),
+AV1_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors),
::testing::ValuesIn(kEncodeVectors),
::testing::ValuesIn(kMinArfVectors));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} // namespace
diff --git a/test/av1_dct_test.cc b/test/av1_dct_test.cc
index 6ab6a90..fa122c7 100644
--- a/test/av1_dct_test.cc
+++ b/test/av1_dct_test.cc
@@ -84,7 +84,7 @@
};
typedef std::tr1::tuple<FdctFunc, FdctFuncRef, int, int> FdctParam;
-class Vp10FwdTxfm : public TransTestBase,
+class AV1FwdTxfm : public TransTestBase,
public ::testing::TestWithParam<FdctParam> {
public:
virtual void SetUp() {
@@ -96,10 +96,10 @@
virtual void TearDown() {}
};
-TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
+TEST_P(AV1FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
INSTANTIATE_TEST_CASE_P(
- C, Vp10FwdTxfm,
+ C, AV1FwdTxfm,
::testing::Values(FdctParam(&fdct4, &reference_dct_1d, 4, 1),
FdctParam(&fdct8, &reference_dct_1d, 8, 1),
FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
diff --git a/test/av1_inv_txfm_test.cc b/test/av1_inv_txfm_test.cc
index a2731c7..64d7771 100644
--- a/test/av1_inv_txfm_test.cc
+++ b/test/av1_inv_txfm_test.cc
@@ -88,7 +88,7 @@
};
typedef std::tr1::tuple<IdctFunc, IdctFuncRef, int, int> IdctParam;
-class Vp10InvTxfm : public TransTestBase,
+class AV1InvTxfm : public TransTestBase,
public ::testing::TestWithParam<IdctParam> {
public:
virtual void SetUp() {
@@ -100,24 +100,24 @@
virtual void TearDown() {}
};
-TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
+TEST_P(AV1InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
INSTANTIATE_TEST_CASE_P(
- C, Vp10InvTxfm,
- ::testing::Values(IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
- IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
- IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
- IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)));
+ C, AV1InvTxfm,
+ ::testing::Values(IdctParam(&av1_idct4_c, &reference_idct_1d, 4, 1),
+ IdctParam(&av1_idct8_c, &reference_idct_1d, 8, 2),
+ IdctParam(&av1_idct16_c, &reference_idct_1d, 16, 4),
+ IdctParam(&av1_idct32_c, &reference_idct_1d, 32, 6)));
typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
PartialInvTxfmParam;
const int kMaxNumCoeffs = 1024;
-class Vp10PartialIDctTest
+class AV1PartialIDctTest
: public ::testing::TestWithParam<PartialInvTxfmParam> {
public:
- virtual ~Vp10PartialIDctTest() {}
+ virtual ~AV1PartialIDctTest() {}
virtual void SetUp() {
ftxfm_ = GET_PARAM(0);
full_itxfm_ = GET_PARAM(1);
@@ -136,7 +136,7 @@
InvTxfmFunc partial_itxfm_;
};
-TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
+TEST_P(AV1PartialIDctTest, RunQuantCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
@@ -184,7 +184,7 @@
// quantization with maximum allowed step sizes
test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
for (int j = 1; j < last_nonzero_; ++j)
- test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+ test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] =
(output_ref_block[j] / 1828) * 1828;
}
@@ -202,7 +202,7 @@
<< "Error: partial inverse transform produces different results";
}
-TEST_P(Vp10PartialIDctTest, ResultsMatch) {
+TEST_P(AV1PartialIDctTest, ResultsMatch) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
@@ -235,7 +235,7 @@
max_energy_leftover = 0;
coef = 0;
}
- test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef;
+ test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] = coef;
}
memcpy(test_coef_block2, test_coef_block1,
@@ -257,19 +257,19 @@
using std::tr1::make_tuple;
INSTANTIATE_TEST_CASE_P(
- C, Vp10PartialIDctTest,
- ::testing::Values(make_tuple(&aom_fdct32x32_c, &vp10_idct32x32_1024_add_c,
- &vp10_idct32x32_34_add_c, TX_32X32, 34),
- make_tuple(&aom_fdct32x32_c, &vp10_idct32x32_1024_add_c,
- &vp10_idct32x32_1_add_c, TX_32X32, 1),
- make_tuple(&aom_fdct16x16_c, &vp10_idct16x16_256_add_c,
- &vp10_idct16x16_10_add_c, TX_16X16, 10),
- make_tuple(&aom_fdct16x16_c, &vp10_idct16x16_256_add_c,
- &vp10_idct16x16_1_add_c, TX_16X16, 1),
- make_tuple(&aom_fdct8x8_c, &vp10_idct8x8_64_add_c,
- &vp10_idct8x8_12_add_c, TX_8X8, 12),
- make_tuple(&aom_fdct8x8_c, &vp10_idct8x8_64_add_c,
- &vp10_idct8x8_1_add_c, TX_8X8, 1),
- make_tuple(&aom_fdct4x4_c, &vp10_idct4x4_16_add_c,
- &vp10_idct4x4_1_add_c, TX_4X4, 1)));
+ C, AV1PartialIDctTest,
+ ::testing::Values(make_tuple(&aom_fdct32x32_c, &av1_idct32x32_1024_add_c,
+ &av1_idct32x32_34_add_c, TX_32X32, 34),
+ make_tuple(&aom_fdct32x32_c, &av1_idct32x32_1024_add_c,
+ &av1_idct32x32_1_add_c, TX_32X32, 1),
+ make_tuple(&aom_fdct16x16_c, &av1_idct16x16_256_add_c,
+ &av1_idct16x16_10_add_c, TX_16X16, 10),
+ make_tuple(&aom_fdct16x16_c, &av1_idct16x16_256_add_c,
+ &av1_idct16x16_1_add_c, TX_16X16, 1),
+ make_tuple(&aom_fdct8x8_c, &av1_idct8x8_64_add_c,
+ &av1_idct8x8_12_add_c, TX_8X8, 12),
+ make_tuple(&aom_fdct8x8_c, &av1_idct8x8_64_add_c,
+ &av1_idct8x8_1_add_c, TX_8X8, 1),
+ make_tuple(&aom_fdct4x4_c, &av1_idct4x4_16_add_c,
+ &av1_idct4x4_1_add_c, TX_4X4, 1)));
} // namespace
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index 18d5c02..b376a09 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -25,7 +25,7 @@
const int num_tests = 10;
} // namespace
-TEST(VP10, TestBitIO) {
+TEST(AV1, TestBitIO) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
for (int n = 0; n < num_tests; ++n) {
for (int method = 0; method <= 7; ++method) { // we generate various proba
diff --git a/test/borders_test.cc b/test/borders_test.cc
index 7fad160..95c87fb 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -79,6 +79,6 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(BordersTest,
+AV1_INSTANTIATE_TEST_CASE(BordersTest,
::testing::Values(::libaom_test::kTwoPassGood));
} // namespace
diff --git a/test/codec_factory.h b/test/codec_factory.h
index db65ac3..3306ce7 100644
--- a/test/codec_factory.h
+++ b/test/codec_factory.h
@@ -14,10 +14,10 @@
#include "./aom_config.h"
#include "aom/aom_decoder.h"
#include "aom/aom_encoder.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
#include "aom/vp8cx.h"
#endif
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
#include "aom/vp8dx.h"
#endif
@@ -70,47 +70,47 @@
std::tr1::tuple<const libaom_test::CodecFactory*, T1, T2, T3> > {};
/*
- * VP10 Codec Definitions
+ * AV1 Codec Definitions
*/
-#if CONFIG_VP10
-class VP10Decoder : public Decoder {
+#if CONFIG_AV1
+class AV1Decoder : public Decoder {
public:
- VP10Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
+ AV1Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
: Decoder(cfg, deadline) {}
- VP10Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
+ AV1Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
unsigned long deadline) // NOLINT
: Decoder(cfg, flag, deadline) {}
protected:
virtual aom_codec_iface_t* CodecInterface() const {
-#if CONFIG_VP10_DECODER
- return &aom_codec_vp10_dx_algo;
+#if CONFIG_AV1_DECODER
+ return &aom_codec_av1_dx_algo;
#else
return NULL;
#endif
}
};
-class VP10Encoder : public Encoder {
+class AV1Encoder : public Encoder {
public:
- VP10Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
+ AV1Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
const unsigned long init_flags, TwopassStatsStore* stats)
: Encoder(cfg, deadline, init_flags, stats) {}
protected:
virtual aom_codec_iface_t* CodecInterface() const {
-#if CONFIG_VP10_ENCODER
- return &aom_codec_vp10_cx_algo;
+#if CONFIG_AV1_ENCODER
+ return &aom_codec_av1_cx_algo;
#else
return NULL;
#endif
}
};
-class VP10CodecFactory : public CodecFactory {
+class AV1CodecFactory : public CodecFactory {
public:
- VP10CodecFactory() : CodecFactory() {}
+ AV1CodecFactory() : CodecFactory() {}
virtual Decoder* CreateDecoder(aom_codec_dec_cfg_t cfg,
unsigned long deadline) const {
@@ -120,8 +120,8 @@
virtual Decoder* CreateDecoder(aom_codec_dec_cfg_t cfg,
const aom_codec_flags_t flags,
unsigned long deadline) const { // NOLINT
-#if CONFIG_VP10_DECODER
- return new VP10Decoder(cfg, flags, deadline);
+#if CONFIG_AV1_DECODER
+ return new AV1Decoder(cfg, flags, deadline);
#else
return NULL;
#endif
@@ -131,8 +131,8 @@
unsigned long deadline,
const unsigned long init_flags,
TwopassStatsStore* stats) const {
-#if CONFIG_VP10_ENCODER
- return new VP10Encoder(cfg, deadline, init_flags, stats);
+#if CONFIG_AV1_ENCODER
+ return new AV1Encoder(cfg, deadline, init_flags, stats);
#else
return NULL;
#endif
@@ -140,26 +140,26 @@
virtual aom_codec_err_t DefaultEncoderConfig(aom_codec_enc_cfg_t* cfg,
int usage) const {
-#if CONFIG_VP10_ENCODER
- return aom_codec_enc_config_default(&aom_codec_vp10_cx_algo, cfg, usage);
+#if CONFIG_AV1_ENCODER
+ return aom_codec_enc_config_default(&aom_codec_av1_cx_algo, cfg, usage);
#else
return VPX_CODEC_INCAPABLE;
#endif
}
};
-const libaom_test::VP10CodecFactory kVP10;
+const libaom_test::AV1CodecFactory kAV1;
-#define VP10_INSTANTIATE_TEST_CASE(test, ...) \
+#define AV1_INSTANTIATE_TEST_CASE(test, ...) \
INSTANTIATE_TEST_CASE_P( \
- VP10, test, \
+ AV1, test, \
::testing::Combine( \
::testing::Values(static_cast<const libaom_test::CodecFactory*>( \
- &libaom_test::kVP10)), \
+ &libaom_test::kAV1)), \
__VA_ARGS__))
#else
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)
-#endif // CONFIG_VP10
+#define AV1_INSTANTIATE_TEST_CASE(test, ...)
+#endif // CONFIG_AV1
} // namespace libaom_test
#endif // TEST_CODEC_FACTORY_H_
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 004e2e2..3a735b2 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -165,7 +165,7 @@
block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, output_height);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter, const int16_t *VFilter,
@@ -266,7 +266,7 @@
highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width,
output_height, bd);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
@@ -279,7 +279,7 @@
aom_memalign(kDataAlignment, kOutputBufferSize));
output_ref_ = reinterpret_cast<uint8_t *>(
aom_memalign(kDataAlignment, kOutputBufferSize));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
input16_ = reinterpret_cast<uint16_t *>(aom_memalign(
kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
1;
@@ -299,7 +299,7 @@
output_ = NULL;
aom_free(output_ref_);
output_ref_ = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_free(input16_ - 1);
input16_ = NULL;
aom_free(output16_);
@@ -334,7 +334,7 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ != 0)
mask_ = (1 << UUT_->use_highbd_) - 1;
else
@@ -352,12 +352,12 @@
for (int i = 0; i < kInputBufferSize; ++i) {
if (i & 1) {
input_[i] = 255;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
input16_[i] = mask_;
#endif
} else {
input_[i] = prng.Rand8Extremes();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
input16_[i] = prng.Rand16() & mask_;
#endif
}
@@ -366,14 +366,14 @@
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
aom_memset16(input16_, value, kInputBufferSize);
#endif
}
void CopyOutputToRef() {
memcpy(output_ref_, output_, kOutputBufferSize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
memcpy(output16_ref_, output16_, kOutputBufferSize);
#endif
}
@@ -385,7 +385,7 @@
}
uint8_t *input() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
@@ -398,7 +398,7 @@
}
uint8_t *output() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
@@ -411,7 +411,7 @@
}
uint8_t *output_ref() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
@@ -424,7 +424,7 @@
}
uint16_t lookup(uint8_t *list, int index) const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return list[index];
} else {
@@ -436,7 +436,7 @@
}
void assign_val(uint8_t *list, int index, uint16_t val) const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
list[index] = (uint8_t)val;
} else {
@@ -452,7 +452,7 @@
const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
unsigned int dst_stride, unsigned int output_width,
unsigned int output_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
dst_stride, output_width, output_height);
@@ -475,7 +475,7 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
dst_stride, output_width, output_height);
@@ -495,7 +495,7 @@
static uint8_t *input_;
static uint8_t *output_;
static uint8_t *output_ref_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static uint16_t *input16_;
static uint16_t *output16_;
static uint16_t *output16_ref_;
@@ -506,7 +506,7 @@
uint8_t *ConvolveTest::input_ = NULL;
uint8_t *ConvolveTest::output_ = NULL;
uint8_t *ConvolveTest::output_ref_ = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *ConvolveTest::input16_ = NULL;
uint16_t *ConvolveTest::output16_ = NULL;
uint16_t *ConvolveTest::output16_ref_ = NULL;
@@ -613,7 +613,7 @@
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
- vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -636,7 +636,7 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t *const in = input();
uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint8_t ref8[kOutputStride * kMaxDimension];
uint16_t ref16[kOutputStride * kMaxDimension];
uint8_t *ref;
@@ -651,7 +651,7 @@
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
- vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -693,7 +693,7 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t *const in = input();
uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint8_t ref8[kOutputStride * kMaxDimension];
uint16_t ref16[kOutputStride * kMaxDimension];
uint8_t *ref;
@@ -711,7 +711,7 @@
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
uint16_t r;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
r = prng.Rand8Extremes();
} else {
@@ -728,7 +728,7 @@
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
- vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -770,7 +770,7 @@
TEST_P(ConvolveTest, FilterExtremes) {
uint8_t *const in = input();
uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint8_t ref8[kOutputStride * kMaxDimension];
uint16_t ref16[kOutputStride * kMaxDimension];
uint8_t *ref;
@@ -788,7 +788,7 @@
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
uint16_t r;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
r = prng.Rand8Extremes();
} else {
@@ -807,7 +807,7 @@
while (seed_val < 256) {
for (int y = 0; y < 8; ++y) {
for (int x = 0; x < 8; ++x) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
((seed_val >> (axis ? y : x)) & 1) * mask_);
#else
@@ -825,7 +825,7 @@
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
- vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
@@ -867,7 +867,7 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t *const in = input();
uint8_t *const out = output();
- const InterpKernel *const eighttap = vp10_filter_kernels[EIGHTTAP];
+ const InterpKernel *const eighttap = av1_filter_kernels[EIGHTTAP];
SetConstantInput(127);
@@ -894,7 +894,7 @@
using std::tr1::make_tuple;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define WRAP(func, bd) \
void wrap_##func##_##bd( \
const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
@@ -1032,7 +1032,7 @@
#endif
#if HAVE_SSE2 && ARCH_X86_64
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
#if CONFIG_USE_X86INC
wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8,
@@ -1130,7 +1130,7 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
diff --git a/test/cpu_speed_test.cc b/test/cpu_speed_test.cc
index a5339b7..71bb452 100644
--- a/test/cpu_speed_test.cc
+++ b/test/cpu_speed_test.cc
@@ -130,7 +130,7 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(CpuSpeedTest,
+AV1_INSTANTIATE_TEST_CASE(CpuSpeedTest,
::testing::Values(::libaom_test::kTwoPassGood,
::libaom_test::kOnePassGood),
::testing::Range(0, 3));
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index 2aa7a04..7e1cbba 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -545,7 +545,7 @@
}
// Check basic rate targeting for 2 temporal layers.
-#if 0 // VP10 does not support multiple layers yet
+#if 0 // AV1 does not support multiple layers yet
TEST_P(DatarateTestVP9Large, BasicRateTargeting2TemporalLayers) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
@@ -894,13 +894,13 @@
EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
}
-VP10_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
+AV1_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
::testing::Values(::libaom_test::kOnePassGood,
::libaom_test::kRealTime),
::testing::Range(2, 7));
-/* VP10 does not support multiple layers yet.
-VP10_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
+/* AV1 does not support multiple layers yet.
+AV1_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
::testing::Values(::libaom_test::kRealTime),
::testing::Range(5, 8));
*/
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index e662592..0feb80f 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -253,15 +253,15 @@
}
void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
- vp10_fht16x16_c(in, out, stride, tx_type);
+ av1_fht16x16_c(in, out, stride, tx_type);
}
void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
int tx_type) {
- vp10_iht16x16_256_add_c(in, dest, stride, tx_type);
+ av1_iht16x16_256_add_c(in, dest, stride, tx_type);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
aom_highbd_idct16x16_256_add_c(in, out, stride, 10);
}
@@ -281,11 +281,11 @@
}
void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+ av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
}
void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+ av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
}
void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
@@ -313,7 +313,7 @@
aom_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
}
#endif // HAVE_SSE2
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
class Trans16x16TestBase {
public:
@@ -334,7 +334,7 @@
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
@@ -345,7 +345,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -358,7 +358,7 @@
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -366,7 +366,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -445,7 +445,7 @@
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
#endif
@@ -465,7 +465,7 @@
// clear reconstructed pixel buffers
memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
#endif
@@ -477,7 +477,7 @@
if (bit_depth_ == VPX_BITS_8) {
inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
tx_type_);
@@ -487,7 +487,7 @@
}
if (bit_depth_ == VPX_BITS_8) {
for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]);
#endif
@@ -502,10 +502,10 @@
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
@@ -516,12 +516,12 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
in[j] = src16[j] - dst16[j];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
@@ -531,20 +531,20 @@
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const uint32_t diff = dst[j] - src[j];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error
<< " at index " << j;
@@ -556,14 +556,14 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 10000;
const int eob = 10;
- const int16_t *scan = vp10_default_scan_orders[TX_16X16].scan;
+ const int16_t *scan = av1_default_scan_orders[TX_16X16].scan;
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j) {
@@ -576,31 +576,31 @@
if (bit_depth_ == VPX_BITS_8) {
dst[j] = 0;
ref[j] = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
dst16[j] = 0;
ref16[j] = 0;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
if (bit_depth_ == VPX_BITS_8) {
ref_txfm(coeff, ref, pitch_);
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
} else {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
#else
const uint32_t diff = dst[j] - ref[j];
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error "
<< error << " at index " << j;
@@ -630,7 +630,7 @@
fwd_txfm_ref = fdct16x16_ref;
inv_txfm_ref = idct16x16_ref;
mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth_) {
case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
@@ -682,7 +682,7 @@
fwd_txfm_ref = fht16x16_ref;
inv_txfm_ref = iht16x16_ref;
mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (bit_depth_) {
case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break;
case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break;
@@ -750,7 +750,7 @@
using std::tr1::make_tuple;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
@@ -762,59 +762,59 @@
::testing::Values(make_tuple(&aom_fdct16x16_c,
&aom_idct16x16_256_add_c,
0, VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_neon,
0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_sse2,
&aom_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
- ::testing::Values(make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
-#endif // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ ::testing::Values(make_tuple(&av1_fht16x16_sse2,
+ &av1_iht16x16_256_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_sse2,
+ &av1_iht16x16_256_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_sse2,
+ &av1_iht16x16_256_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&av1_fht16x16_sse2,
+ &av1_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
@@ -828,13 +828,13 @@
VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
- ::testing::Values(make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ ::testing::Values(make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
3, VPX_BITS_8)));
// Optimizations take effect at a threshold of 3155, so we use a value close to
// that to test both branches.
@@ -848,22 +848,22 @@
&idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2,
3167, VPX_BITS_12)));
-#endif // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_msa,
&aom_idct16x16_256_add_msa,
0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
MSA, Trans16x16HT,
- ::testing::Values(make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ ::testing::Values(make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
3, VPX_BITS_8)));
-#endif // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index 466076b..92f3278 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -75,7 +75,7 @@
typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, aom_bit_depth_t>
Trans32x32Param;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void idct32x32_8(const tran_low_t *in, uint8_t *out, int stride) {
aom_highbd_idct32x32_1024_add_c(in, out, stride, 8);
}
@@ -87,7 +87,7 @@
void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
aom_highbd_idct32x32_1024_add_c(in, out, stride, 12);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
class Trans32x32Test : public ::testing::TestWithParam<Trans32x32Param> {
public:
@@ -120,7 +120,7 @@
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
@@ -132,7 +132,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -144,7 +144,7 @@
ASM_REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, 32));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32));
@@ -152,7 +152,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -253,7 +253,7 @@
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
@@ -267,7 +267,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -281,13 +281,13 @@
coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, CONVERT_TO_BYTEPTR(dst16), 32));
#endif
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -302,7 +302,7 @@
using std::tr1::make_tuple;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans32x32Test,
::testing::Values(
@@ -320,27 +320,27 @@
VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_c, &aom_idct32x32_1024_add_c,
1, VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_neon,
0, VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_c,
&aom_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
-#endif // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_sse2,
&aom_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_sse2,
&aom_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
-#endif // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans32x32Test,
::testing::Values(
@@ -354,23 +354,23 @@
VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_sse2, &aom_idct32x32_1024_add_c, 1,
VPX_BITS_8)));
-#endif // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
AVX2, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_avx2,
&aom_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_avx2,
&aom_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
-#endif // HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
MSA, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_msa,
&aom_idct32x32_1024_add_msa, 0, VPX_BITS_8),
make_tuple(&aom_fdct32x32_rd_msa,
&aom_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
-#endif // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/decode_api_test.cc b/test/decode_api_test.cc
index 1fc083c..51e67aa 100644
--- a/test/decode_api_test.cc
+++ b/test/decode_api_test.cc
@@ -21,8 +21,8 @@
TEST(DecodeAPI, InvalidParams) {
static const aom_codec_iface_t *kCodecs[] = {
-#if CONFIG_VP10_DECODER
- &aom_codec_vp10_dx_algo,
+#if CONFIG_AV1_DECODER
+ &aom_codec_av1_dx_algo,
#endif
};
uint8_t buf[1] = { 0 };
diff --git a/test/decode_perf_test.cc b/test/decode_perf_test.cc
index 58dcb4c..bea1dec 100644
--- a/test/decode_perf_test.cc
+++ b/test/decode_perf_test.cc
@@ -255,6 +255,6 @@
printf("}\n");
}
-VP10_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
+AV1_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
::testing::Values(::libaom_test::kTwoPassGood));
} // namespace
diff --git a/test/encode_api_test.cc b/test/encode_api_test.cc
index e01646d..09ee91a 100644
--- a/test/encode_api_test.cc
+++ b/test/encode_api_test.cc
@@ -26,8 +26,8 @@
#if CONFIG_VP9_ENCODER
&aom_codec_vp9_cx_algo,
#endif
-#if CONFIG_VP10_ENCODER
- &aom_codec_vp10_cx_algo,
+#if CONFIG_AV1_ENCODER
+ &aom_codec_av1_cx_algo,
#endif
};
uint8_t buf[1] = { 0 };
diff --git a/test/encode_perf_test.cc b/test/encode_perf_test.cc
index 9b4abee..a78afee 100644
--- a/test/encode_perf_test.cc
+++ b/test/encode_perf_test.cc
@@ -182,6 +182,6 @@
}
}
-VP10_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
+AV1_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
::testing::Values(::libaom_test::kRealTime));
} // namespace
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index 1d658e1..c6d42aa 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -33,9 +33,9 @@
res = aom_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
-#if CONFIG_VP10_ENCODER
- if (CodecInterface() == &aom_codec_vp10_cx_algo) {
- // Default to 1 tile column for VP10.
+#if CONFIG_AV1_ENCODER
+ if (CodecInterface() == &aom_codec_av1_cx_algo) {
+ // Default to 1 tile column for AV1.
const int log2_tile_columns = 0;
res = aom_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
log2_tile_columns);
diff --git a/test/encode_test_driver.h b/test/encode_test_driver.h
index e8d2e9d..a60d6df 100644
--- a/test/encode_test_driver.h
+++ b/test/encode_test_driver.h
@@ -17,7 +17,7 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./aom_config.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
#include "aom/vp8cx.h"
#endif
#include "aom/aom_encoder.h"
@@ -133,7 +133,7 @@
const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
void Control(int ctrl_id, aom_active_map_t *arg) {
const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
diff --git a/test/encoder_parms_get_to_decoder.cc b/test/encoder_parms_get_to_decoder.cc
index 52d15a5..602bbca 100644
--- a/test/encoder_parms_get_to_decoder.cc
+++ b/test/encoder_parms_get_to_decoder.cc
@@ -100,7 +100,7 @@
reinterpret_cast<aom_codec_alg_priv_t *>(vp9_decoder->priv);
FrameWorkerData *const worker_data =
reinterpret_cast<FrameWorkerData *>(priv->frame_workers[0].data1);
- VP10_COMMON *const common = &worker_data->pbi->common;
+ AV1_COMMON *const common = &worker_data->pbi->common;
if (encode_parms.lossless) {
EXPECT_EQ(0, common->base_qindex);
@@ -143,7 +143,7 @@
delete video;
}
-VP10_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
+AV1_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
::testing::ValuesIn(kVP9EncodeParameterSet),
::testing::ValuesIn(kVP9EncodePerfTestVectors));
} // namespace
diff --git a/test/end_to_end_test.cc b/test/end_to_end_test.cc
index 1617f4a..cae8c9a 100644
--- a/test/end_to_end_test.cc
+++ b/test/end_to_end_test.cc
@@ -44,7 +44,7 @@
{ "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 },
{ "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
{ "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 },
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
{ "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 },
{ "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 },
{ "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 },
@@ -53,7 +53,7 @@
{ "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 },
{ "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 },
{ "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 },
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
// Encoding modes tested
@@ -169,22 +169,22 @@
delete (video);
}
-#if CONFIG_VPX_HIGHBITDEPTH
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1_ENCODER
// TODO(angiebird): many fail in high bitdepth mode.
INSTANTIATE_TEST_CASE_P(
- DISABLED_VP10, EndToEndTestLarge,
+ DISABLED_AV1, EndToEndTestLarge,
::testing::Combine(
::testing::Values(static_cast<const libaom_test::CodecFactory *>(
- &libaom_test::kVP10)),
+ &libaom_test::kAV1)),
::testing::ValuesIn(kEncodingModeVectors),
::testing::ValuesIn(kTestVectors),
::testing::ValuesIn(kCpuUsedVectors)));
-#endif // CONFIG_VP10_ENCODER
+#endif // CONFIG_AV1_ENCODER
#else
-VP10_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
+AV1_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
::testing::ValuesIn(kEncodingModeVectors),
::testing::ValuesIn(kTestVectors),
::testing::ValuesIn(kCpuUsedVectors));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} // namespace
diff --git a/test/error_block_test.cc b/test/error_block_test.cc
index 9eb51ae..7925ae8 100644
--- a/test/error_block_test.cc
+++ b/test/error_block_test.cc
@@ -27,7 +27,7 @@
using libaom_test::ACMRandom;
namespace {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int kNumIterations = 1000;
typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff,
@@ -158,51 +158,51 @@
using std::tr1::make_tuple;
#if CONFIG_USE_X86INC
-int64_t wrap_vp10_highbd_block_error_8bit_c(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_c(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz,
int bps) {
assert(bps == 8);
- return vp10_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz);
+ return av1_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz);
}
#if HAVE_SSE2
-int64_t wrap_vp10_highbd_block_error_8bit_sse2(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_sse2(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
intptr_t block_size,
int64_t *ssz, int bps) {
assert(bps == 8);
- return vp10_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz);
+ return av1_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz);
}
INSTANTIATE_TEST_CASE_P(
SSE2, ErrorBlockTest,
- ::testing::Values(make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_10),
- make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_12),
- make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_8),
- make_tuple(&wrap_vp10_highbd_block_error_8bit_sse2,
- &wrap_vp10_highbd_block_error_8bit_c,
+ ::testing::Values(make_tuple(&av1_highbd_block_error_sse2,
+ &av1_highbd_block_error_c, VPX_BITS_10),
+ make_tuple(&av1_highbd_block_error_sse2,
+ &av1_highbd_block_error_c, VPX_BITS_12),
+ make_tuple(&av1_highbd_block_error_sse2,
+ &av1_highbd_block_error_c, VPX_BITS_8),
+ make_tuple(&wrap_av1_highbd_block_error_8bit_sse2,
+ &wrap_av1_highbd_block_error_8bit_c,
VPX_BITS_8)));
#endif // HAVE_SSE2
#if HAVE_AVX
-int64_t wrap_vp10_highbd_block_error_8bit_avx(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_avx(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz,
int bps) {
assert(bps == 8);
- return vp10_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz);
+ return av1_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz);
}
INSTANTIATE_TEST_CASE_P(AVX, ErrorBlockTest,
::testing::Values(make_tuple(
- &wrap_vp10_highbd_block_error_8bit_avx,
- &wrap_vp10_highbd_block_error_8bit_c, VPX_BITS_8)));
+ &wrap_av1_highbd_block_error_8bit_avx,
+ &wrap_av1_highbd_block_error_8bit_c, VPX_BITS_8)));
#endif // HAVE_AVX
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
} // namespace
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index 8f39dff..1b8d595 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -570,7 +570,7 @@
}
}
-// SVC-related tests don't run for VP10 since SVC is not supported.
-VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES,
+// SVC-related tests don't run for AV1 since SVC is not supported.
+AV1_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES,
::testing::Values(false));
} // namespace
diff --git a/test/ethread_test.cc b/test/ethread_test.cc
index 2250361..aae62e9 100644
--- a/test/ethread_test.cc
+++ b/test/ethread_test.cc
@@ -124,7 +124,7 @@
ASSERT_EQ(single_thr_md5, multi_thr_md5);
}
-VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
+AV1_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
::testing::Values(::libaom_test::kTwoPassGood,
::libaom_test::kOnePassGood),
::testing::Range(1, 3));
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index 0c4cafb..81d0e38 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -44,14 +44,14 @@
}
void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
- vp10_fht4x4_c(in, out, stride, tx_type);
+ av1_fht4x4_c(in, out, stride, tx_type);
}
void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
- vp10_fwht4x4_c(in, out, stride);
+ av1_fwht4x4_c(in, out, stride);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
aom_highbd_idct4x4_16_add_c(in, out, stride, 10);
}
@@ -61,11 +61,11 @@
}
void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+ av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
}
void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+ av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
}
void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
@@ -85,7 +85,7 @@
aom_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
}
#endif // HAVE_SSE2
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
class Trans4x4TestBase {
public:
@@ -106,7 +106,7 @@
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
@@ -117,7 +117,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -130,7 +130,7 @@
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -138,7 +138,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -218,7 +218,7 @@
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
@@ -230,7 +230,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -243,7 +243,7 @@
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -251,7 +251,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -381,7 +381,7 @@
TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); }
using std::tr1::make_tuple;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
@@ -393,82 +393,82 @@
::testing::Values(make_tuple(&aom_fdct4x4_c,
&aom_idct4x4_16_add_c, 0,
VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4WHT,
::testing::Values(
- make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
- make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
- make_tuple(&vp10_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+ make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
- ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+ ::testing::Values(make_tuple(&av1_fwht4x4_c,
&aom_iwht4x4_16_add_c, 0,
VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_c,
&aom_idct4x4_16_add_neon,
0, VPX_BITS_8)));
-#endif // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 3, VPX_BITS_8)));
-#endif // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VPX_HIGHBITDEPTH && \
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_AOM_HIGHBITDEPTH && \
!CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(MMX, Trans4x4WHT,
- ::testing::Values(make_tuple(&vp10_fwht4x4_mmx,
+ ::testing::Values(make_tuple(&av1_fwht4x4_mmx,
&aom_iwht4x4_16_add_c, 0,
VPX_BITS_8)));
#endif
-#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && \
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && \
!CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4WHT,
- ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+ ::testing::Values(make_tuple(&av1_fwht4x4_c,
&aom_iwht4x4_16_add_sse2,
0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_sse2,
&aom_idct4x4_16_add_sse2,
@@ -476,14 +476,14 @@
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3,
VPX_BITS_8)));
-#endif // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
@@ -496,13 +496,13 @@
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
-#endif // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_msa,
&aom_idct4x4_16_add_msa, 0,
@@ -510,9 +510,9 @@
INSTANTIATE_TEST_CASE_P(
MSA, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 0, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 1, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 2, VPX_BITS_8),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 3, VPX_BITS_8)));
-#endif // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 197f01d..20a9f75 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -81,10 +81,10 @@
}
void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
- vp10_fht8x8_c(in, out, stride, tx_type);
+ av1_fht8x8_c(in, out, stride, tx_type);
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
aom_highbd_idct8x8_64_add_c(in, out, stride, 10);
}
@@ -94,11 +94,11 @@
}
void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+ av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
}
void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
- vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+ av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
}
void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
@@ -126,7 +126,7 @@
aom_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
}
#endif // HAVE_SSE2
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
class FwdTrans8x8TestBase {
public:
@@ -211,7 +211,7 @@
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
@@ -223,7 +223,7 @@
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
@@ -247,7 +247,7 @@
}
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -255,7 +255,7 @@
}
for (int j = 0; j < 64; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -287,7 +287,7 @@
DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
@@ -307,7 +307,7 @@
dst[j] = rnd.Rand8() % 2 ? 255 : 0;
}
test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
if (i == 0) {
src16[j] = mask_;
@@ -330,7 +330,7 @@
fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -338,7 +338,7 @@
}
for (int j = 0; j < 64; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -373,7 +373,7 @@
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
#endif
@@ -387,7 +387,7 @@
src[j] = rnd.Rand8() % 2 ? 255 : 0;
dst[j] = src[j] > 0 ? 0 : 255;
in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
dst16[j] = src16[j] > 0 ? 0 : mask_;
@@ -402,7 +402,7 @@
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -410,7 +410,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
@@ -458,11 +458,11 @@
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
#endif
- const int16_t *scan = vp10_default_scan_orders[TX_8X8].scan;
+ const int16_t *scan = av1_default_scan_orders[TX_8X8].scan;
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j) {
@@ -475,7 +475,7 @@
if (bit_depth_ == VPX_BITS_8) {
dst[j] = 0;
ref[j] = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
dst16[j] = 0;
ref16[j] = 0;
@@ -485,7 +485,7 @@
if (bit_depth_ == VPX_BITS_8) {
ref_txfm(coeff, ref, pitch_);
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
ASM_REGISTER_STATE_CHECK(
@@ -494,7 +494,7 @@
}
for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
#else
@@ -620,7 +620,7 @@
using std::tr1::make_tuple;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
@@ -632,52 +632,52 @@
::testing::Values(make_tuple(&aom_fdct8x8_c,
&aom_idct8x8_64_add_c, 0,
VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
- make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_neon,
&aom_idct8x8_64_add_neon,
0, VPX_BITS_8)));
-#endif // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 3, VPX_BITS_8)));
-#endif // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_sse2,
&aom_idct8x8_64_add_sse2,
@@ -685,14 +685,14 @@
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3,
VPX_BITS_8)));
-#endif // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_sse2, &aom_idct8x8_64_add_c, 0,
@@ -709,10 +709,10 @@
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
// Optimizations take effect at a threshold of 6201, so we use a value close to
// that to test both branches.
@@ -725,17 +725,17 @@
make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225,
VPX_BITS_12),
make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
-#endif // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
- !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_ssse3,
&aom_idct8x8_64_add_ssse3,
0, VPX_BITS_8)));
#endif
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_msa,
&aom_idct8x8_64_add_msa, 0,
@@ -743,9 +743,9 @@
INSTANTIATE_TEST_CASE_P(
MSA, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 3, VPX_BITS_8)));
-#endif // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/frame_size_tests.cc b/test/frame_size_tests.cc
index c1d916f..df49a2c 100644
--- a/test/frame_size_tests.cc
+++ b/test/frame_size_tests.cc
@@ -17,7 +17,7 @@
public ::testing::Test {
protected:
VP9FrameSizeTestsLarge()
- : EncoderTest(&::libaom_test::kVP10), expected_res_(VPX_CODEC_OK) {}
+ : EncoderTest(&::libaom_test::kAV1), expected_res_(VPX_CODEC_OK) {}
virtual ~VP9FrameSizeTestsLarge() {}
virtual void SetUp() {
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 545098a..0efd0f2 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -128,7 +128,7 @@
using std::tr1::make_tuple;
#if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_USE_X86INC
INSTANTIATE_TEST_CASE_P(
SSE2_TO_C_8, VP9IntraPredTest,
@@ -212,6 +212,6 @@
&aom_highbd_tm_predictor_8x8_c, 8, 12)));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // HAVE_SSE2
} // namespace
diff --git a/test/lossless_test.cc b/test/lossless_test.cc
index 316767e..53895e9 100644
--- a/test/lossless_test.cc
+++ b/test/lossless_test.cc
@@ -118,7 +118,7 @@
EXPECT_GE(psnr_lossless, kMaxPsnr);
}
-VP10_INSTANTIATE_TEST_CASE(LosslessTest,
+AV1_INSTANTIATE_TEST_CASE(LosslessTest,
::testing::Values(::libaom_test::kOnePassGood,
::libaom_test::kTwoPassGood));
} // namespace
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index 594eae9..5a41c1b 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -34,7 +34,7 @@
const int number_of_iterations = 10000;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd);
@@ -50,13 +50,13 @@
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
typedef std::tr1::tuple<loop_op_t, loop_op_t, int, int> loop8_param_t;
typedef std::tr1::tuple<dual_loop_op_t, dual_loop_op_t, int> dualloop8_param_t;
#if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void wrapper_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd) {
@@ -104,11 +104,11 @@
int count) {
aom_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // HAVE_SSE2
#if HAVE_NEON_ASM
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// No neon high bitdepth functions.
#else
void wrapper_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit,
@@ -134,10 +134,10 @@
int count) {
aom_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // HAVE_NEON_ASM
-#if HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
void wrapper_vertical_16_msa(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
@@ -149,7 +149,7 @@
int count) {
aom_lpf_vertical_16_c(s, p, blimit, limit, thresh);
}
-#endif // HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#endif // HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
class Loop8Test6Param : public ::testing::TestWithParam<loop8_param_t> {
public:
@@ -194,14 +194,14 @@
TEST_P(Loop8Test6Param, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
int32_t bd = bit_depth_;
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
@@ -249,7 +249,7 @@
}
ref_s[j] = s[j];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
@@ -257,7 +257,7 @@
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
@@ -276,14 +276,14 @@
TEST_P(Loop8Test6Param, ValueCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int32_t bd = bit_depth_;
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
@@ -318,7 +318,7 @@
s[j] = rnd.Rand16() & mask_;
ref_s[j] = s[j];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
@@ -326,7 +326,7 @@
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
@@ -344,14 +344,14 @@
TEST_P(Loop8Test9Param, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int32_t bd = bit_depth_;
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
@@ -411,7 +411,7 @@
}
ref_s[j] = s[j];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
limit1, thresh1, bd);
ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
@@ -422,7 +422,7 @@
limit1, thresh1);
ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
thresh0, blimit1, limit1, thresh1));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
@@ -440,13 +440,13 @@
TEST_P(Loop8Test9Param, ValueCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
@@ -480,7 +480,7 @@
s[j] = rnd.Rand16() & mask_;
ref_s[j] = s[j];
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int32_t bd = bit_depth_;
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
limit1, thresh1, bd);
@@ -492,7 +492,7 @@
limit1, thresh1);
ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
thresh0, blimit1, limit1, thresh1));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
@@ -510,7 +510,7 @@
using std::tr1::make_tuple;
#if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test6Param,
::testing::Values(
@@ -570,10 +570,10 @@
make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1),
make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
8, 1)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif
-#if HAVE_AVX2 && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_AVX2 && (!CONFIG_AOM_HIGHBITDEPTH)
INSTANTIATE_TEST_CASE_P(
AVX2, Loop8Test6Param,
::testing::Values(make_tuple(&aom_lpf_horizontal_16_avx2,
@@ -583,7 +583,7 @@
#endif
#if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test9Param,
::testing::Values(make_tuple(&aom_highbd_lpf_horizontal_4_dual_sse2,
@@ -621,11 +621,11 @@
&aom_lpf_vertical_4_dual_c, 8),
make_tuple(&aom_lpf_vertical_8_dual_sse2,
&aom_lpf_vertical_8_dual_c, 8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif
#if HAVE_NEON
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// No neon high bitdepth functions.
#else
INSTANTIATE_TEST_CASE_P(
@@ -657,10 +657,10 @@
&aom_lpf_horizontal_4_dual_c, 8),
make_tuple(&aom_lpf_vertical_4_dual_neon,
&aom_lpf_vertical_4_dual_c, 8)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // HAVE_NEON
-#if HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
INSTANTIATE_TEST_CASE_P(
MSA, Loop8Test6Param,
::testing::Values(
@@ -680,6 +680,6 @@
&aom_lpf_vertical_4_dual_c, 8),
make_tuple(&aom_lpf_vertical_8_dual_msa,
&aom_lpf_vertical_8_dual_c, 8)));
-#endif // HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#endif // HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
} // namespace
diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc
index a62afc0..94631f6 100644
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -101,7 +101,7 @@
// quantization with maximum allowed step sizes
test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
for (int j = 1; j < last_nonzero_; ++j)
- test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+ test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] =
(output_ref_block[j] / 1828) * 1828;
}
@@ -152,7 +152,7 @@
max_energy_leftover = 0;
coef = 0;
}
- test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef;
+ test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] = coef;
}
memcpy(test_coef_block2, test_coef_block1,
@@ -190,7 +190,7 @@
make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
&aom_idct4x4_1_add_c, TX_4X4, 1)));
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, PartialIDctTest,
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -205,9 +205,9 @@
&aom_idct8x8_1_add_neon, TX_8X8, 1),
make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
&aom_idct4x4_1_add_neon, TX_4X4, 1)));
-#endif // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, PartialIDctTest,
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -227,14 +227,14 @@
#endif
#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
- !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+ !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSSE3_64, PartialIDctTest,
::testing::Values(make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
&aom_idct8x8_12_add_ssse3, TX_8X8, 12)));
#endif
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
MSA, PartialIDctTest,
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -251,6 +251,6 @@
&aom_idct8x8_1_add_msa, TX_8X8, 1),
make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
&aom_idct4x4_1_add_msa, TX_4X4, 1)));
-#endif // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/quantize_test.cc b/test/quantize_test.cc
index 29bd48d..0418509 100644
--- a/test/quantize_test.cc
+++ b/test/quantize_test.cc
@@ -29,7 +29,7 @@
namespace {
#if !CONFIG_AOM_QM
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const int number_of_iterations = 100;
typedef void (*QuantizeFunc)(const tran_low_t *coeff, intptr_t count,
@@ -100,7 +100,7 @@
const int skip_block = i == 0;
const TX_SIZE sz = (TX_SIZE)(i % 3); // TX_4X4, TX_8X8 TX_16X16
const TX_TYPE tx_type = (TX_TYPE)((i >> 2) % 3);
- const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+ const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
const int count = (4 << sz) * (4 << sz); // 16, 64, 256
int err_count = 0;
*eob_ptr = rnd.Rand16();
@@ -158,7 +158,7 @@
const int skip_block = i == 0;
const TX_SIZE sz = TX_32X32;
const TX_TYPE tx_type = (TX_TYPE)(i % 4);
- const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+ const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
const int count = (4 << sz) * (4 << sz); // 1024
int err_count = 0;
*eob_ptr = rnd.Rand16();
@@ -216,7 +216,7 @@
int skip_block = i == 0;
TX_SIZE sz = (TX_SIZE)(i % 3); // TX_4X4, TX_8X8 TX_16X16
TX_TYPE tx_type = (TX_TYPE)((i >> 2) % 3);
- const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+ const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
int count = (4 << sz) * (4 << sz); // 16, 64, 256
int err_count = 0;
*eob_ptr = rnd.Rand16();
@@ -279,7 +279,7 @@
int skip_block = i == 0;
TX_SIZE sz = TX_32X32;
TX_TYPE tx_type = (TX_TYPE)(i % 4);
- const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+ const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
int count = (4 << sz) * (4 << sz); // 1024
int err_count = 0;
*eob_ptr = rnd.Rand16();
@@ -341,6 +341,6 @@
make_tuple(&aom_highbd_quantize_b_32x32_sse2,
&aom_highbd_quantize_b_32x32_c, VPX_BITS_12)));
#endif // HAVE_SSE2
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_AOM_QM
} // namespace
diff --git a/test/register_state_check.h b/test/register_state_check.h
index 1c7b855..7481e4d 100644
--- a/test/register_state_check.h
+++ b/test/register_state_check.h
@@ -96,7 +96,7 @@
} // namespace libaom_test
#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && !CONFIG_SHARED && \
- HAVE_NEON_ASM && CONFIG_VP10
+ HAVE_NEON_ASM && CONFIG_AV1
extern "C" {
// Save the d8-d15 registers into store.
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 41b4a7b..8ddd08c 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -524,13 +524,13 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(ResizeTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeTest,
::testing::Values(::libaom_test::kRealTime));
-VP10_INSTANTIATE_TEST_CASE(ResizeInternalTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeInternalTest,
::testing::Values(::libaom_test::kOnePassBest));
-VP10_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
::testing::Values(::libaom_test::kRealTime),
::testing::Range(5, 9));
-VP10_INSTANTIATE_TEST_CASE(ResizeCspTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeCspTest,
::testing::Values(::libaom_test::kRealTime));
} // namespace
diff --git a/test/sad_test.cc b/test/sad_test.cc
index b450458..218d02c 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -91,14 +91,14 @@
source_data_ = source_data8_;
reference_data_ = reference_data8_;
second_pred_ = second_pred8_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
use_high_bit_depth_ = true;
bit_depth_ = static_cast<aom_bit_depth_t>(bd_);
source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
@@ -107,11 +107,11 @@
}
virtual uint8_t *GetReference(int block_idx) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (use_high_bit_depth_)
return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
block_idx * kDataBlockSize);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
@@ -121,21 +121,21 @@
unsigned int sad = 0;
const uint8_t *const reference8 = GetReference(block_idx);
const uint8_t *const source8 = source_data_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint16_t *const reference16 =
CONVERT_TO_SHORTPTR(GetReference(block_idx));
const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
if (!use_high_bit_depth_) {
sad += abs(source8[h * source_stride_ + w] -
reference8[h * reference_stride_ + w]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
sad += abs(source16[h * source_stride_ + w] -
reference16[h * reference_stride_ + w]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -150,12 +150,12 @@
const uint8_t *const reference8 = GetReference(block_idx);
const uint8_t *const source8 = source_data_;
const uint8_t *const second_pred8 = second_pred_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const uint16_t *const reference16 =
CONVERT_TO_SHORTPTR(GetReference(block_idx));
const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
if (!use_high_bit_depth_) {
@@ -163,13 +163,13 @@
reference8[h * reference_stride_ + w];
const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
sad += abs(source8[h * source_stride_ + w] - comp_pred);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
const int tmp = second_pred16[h * width_ + w] +
reference16[h * reference_stride_ + w];
const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
sad += abs(source16[h * source_stride_ + w] - comp_pred);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -178,17 +178,17 @@
void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
uint8_t *data8 = data;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
if (!use_high_bit_depth_) {
data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
data16[h * stride + w] = fill_constant;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -196,17 +196,17 @@
void FillRandom(uint8_t *data, int stride) {
uint8_t *data8 = data;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
if (!use_high_bit_depth_) {
data8[h * stride + w] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
data16[h * stride + w] = rnd_.Rand16() & mask_;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -485,7 +485,7 @@
make_tuple(8, 4, &aom_sad8x4_c, -1),
make_tuple(4, 8, &aom_sad4x8_c, -1),
make_tuple(4, 4, &aom_sad4x4_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64_c, 8),
make_tuple(64, 32, &aom_highbd_sad64x32_c, 8),
make_tuple(32, 64, &aom_highbd_sad32x64_c, 8),
@@ -525,7 +525,7 @@
make_tuple(8, 4, &aom_highbd_sad8x4_c, 12),
make_tuple(4, 8, &aom_highbd_sad4x8_c, 12),
make_tuple(4, 4, &aom_highbd_sad4x4_c, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
@@ -543,7 +543,7 @@
make_tuple(8, 4, &aom_sad8x4_avg_c, -1),
make_tuple(4, 8, &aom_sad4x8_avg_c, -1),
make_tuple(4, 4, &aom_sad4x4_avg_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64_avg_c, 8),
make_tuple(64, 32, &aom_highbd_sad64x32_avg_c, 8),
make_tuple(32, 64, &aom_highbd_sad32x64_avg_c, 8),
@@ -583,7 +583,7 @@
make_tuple(8, 4, &aom_highbd_sad8x4_avg_c, 12),
make_tuple(4, 8, &aom_highbd_sad4x8_avg_c, 12),
make_tuple(4, 4, &aom_highbd_sad4x4_avg_c, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
@@ -601,7 +601,7 @@
make_tuple(8, 4, &aom_sad8x4x4d_c, -1),
make_tuple(4, 8, &aom_sad4x8x4d_c, -1),
make_tuple(4, 4, &aom_sad4x4x4d_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64x4d_c, 8),
make_tuple(64, 32, &aom_highbd_sad64x32x4d_c, 8),
make_tuple(32, 64, &aom_highbd_sad32x64x4d_c, 8),
@@ -641,7 +641,7 @@
make_tuple(8, 4, &aom_highbd_sad8x4x4d_c, 12),
make_tuple(4, 8, &aom_highbd_sad4x8x4d_c, 12),
make_tuple(4, 4, &aom_highbd_sad4x4x4d_c, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
@@ -713,7 +713,7 @@
make_tuple(8, 4, &aom_sad8x4_sse2, -1),
make_tuple(4, 8, &aom_sad4x8_sse2, -1),
make_tuple(4, 4, &aom_sad4x4_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64_sse2, 8),
make_tuple(64, 32, &aom_highbd_sad64x32_sse2, 8),
make_tuple(32, 64, &aom_highbd_sad32x64_sse2, 8),
@@ -747,7 +747,7 @@
make_tuple(8, 16, &aom_highbd_sad8x16_sse2, 12),
make_tuple(8, 8, &aom_highbd_sad8x8_sse2, 12),
make_tuple(8, 4, &aom_highbd_sad8x4_sse2, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
@@ -765,7 +765,7 @@
make_tuple(8, 4, &aom_sad8x4_avg_sse2, -1),
make_tuple(4, 8, &aom_sad4x8_avg_sse2, -1),
make_tuple(4, 4, &aom_sad4x4_avg_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64_avg_sse2, 8),
make_tuple(64, 32, &aom_highbd_sad64x32_avg_sse2, 8),
make_tuple(32, 64, &aom_highbd_sad32x64_avg_sse2, 8),
@@ -799,7 +799,7 @@
make_tuple(8, 16, &aom_highbd_sad8x16_avg_sse2, 12),
make_tuple(8, 8, &aom_highbd_sad8x8_avg_sse2, 12),
make_tuple(8, 4, &aom_highbd_sad8x4_avg_sse2, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
@@ -815,7 +815,7 @@
make_tuple(8, 16, &aom_sad8x16x4d_sse2, -1),
make_tuple(8, 8, &aom_sad8x8x4d_sse2, -1),
make_tuple(8, 4, &aom_sad8x4x4d_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
make_tuple(64, 64, &aom_highbd_sad64x64x4d_sse2, 8),
make_tuple(64, 32, &aom_highbd_sad64x32x4d_sse2, 8),
make_tuple(32, 64, &aom_highbd_sad32x64x4d_sse2, 8),
@@ -855,7 +855,7 @@
make_tuple(8, 4, &aom_highbd_sad8x4x4d_sse2, 12),
make_tuple(4, 8, &aom_highbd_sad4x8x4d_sse2, 12),
make_tuple(4, 4, &aom_highbd_sad4x4x4d_sse2, 12),
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
#endif // CONFIG_USE_X86INC
diff --git a/test/superframe_test.cc b/test/superframe_test.cc
index 40d4f6f..19ebcfd 100644
--- a/test/superframe_test.cc
+++ b/test/superframe_test.cc
@@ -37,7 +37,7 @@
SetMode(mode);
sf_count_ = 0;
sf_count_max_ = INT_MAX;
- is_vp10_style_superframe_ = syntax;
+ is_av1_style_superframe_ = syntax;
}
virtual void TearDown() { delete[] modified_buf_; }
@@ -58,7 +58,7 @@
const int frames = (marker & 0x7) + 1;
const int mag = ((marker >> 3) & 3) + 1;
const unsigned int index_sz =
- 2 + mag * (frames - is_vp10_style_superframe_);
+ 2 + mag * (frames - is_av1_style_superframe_);
if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
buffer[pkt->data.frame.sz - index_sz] == marker) {
// frame is a superframe. strip off the index.
@@ -80,7 +80,7 @@
return pkt;
}
- int is_vp10_style_superframe_;
+ int is_av1_style_superframe_;
int sf_count_;
int sf_count_max_;
aom_codec_cx_pkt_t modified_pkt_;
@@ -98,7 +98,7 @@
EXPECT_EQ(sf_count_, 1);
}
-VP10_INSTANTIATE_TEST_CASE(
+AV1_INSTANTIATE_TEST_CASE(
SuperframeTest,
::testing::Combine(::testing::Values(::libaom_test::kTwoPassGood),
::testing::Values(CONFIG_MISC_FIXES)));
diff --git a/test/test-data.mk b/test/test-data.mk
index 9faa4f7..7b174fb 100644
--- a/test/test-data.mk
+++ b/test/test-data.mk
@@ -18,10 +18,10 @@
LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_444.y4m
LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_440.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += desktop_credits.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += rush_hour_444.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += screendata.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += desktop_credits.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_1280_720_30.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += rush_hour_444.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += screendata.y4m
# sort and remove duplicates
LIBAOM_TEST_DATA-yes := $(sort $(LIBAOM_TEST_DATA-yes))
diff --git a/test/test.mk b/test/test.mk
index a0843cb..b23c3ce 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -26,14 +26,14 @@
LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS) += y4m_video_source.h
LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS) += yuv_video_source.h
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_refresh_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += borders_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += cpu_speed_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += frame_size_tests.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += lossless_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += end_to_end_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += ethread_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_refresh_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += borders_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += cpu_speed_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += frame_size_tests.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += lossless_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += end_to_end_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += ethread_test.cc
LIBAOM_TEST_SRCS-yes += decode_test_driver.cc
LIBAOM_TEST_SRCS-yes += decode_test_driver.h
@@ -63,13 +63,13 @@
# Currently we only support decoder perf tests for vp9. Also they read from WebM
# files, so WebM IO is required.
-ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_VP10_DECODER)$(CONFIG_WEBM_IO), \
+ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_AV1_DECODER)$(CONFIG_WEBM_IO), \
yesyesyes)
LIBAOM_TEST_SRCS-yes += decode_perf_test.cc
endif
# encode perf tests are vp9 only
-ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_VP10_ENCODER), yesyes)
+ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_AV1_ENCODER), yesyes)
LIBAOM_TEST_SRCS-yes += encode_perf_test.cc
endif
@@ -81,11 +81,11 @@
##
ifeq ($(CONFIG_SHARED),)
-## VP10
-ifeq ($(CONFIG_VP10),yes)
+## AV1
+ifeq ($(CONFIG_AV1),yes)
# These tests require both the encoder and decoder to be built.
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_DECODER),yesyes)
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_DECODER),yesyes)
# IDCT test currently depends on FDCT function
LIBAOM_TEST_SRCS-yes += idct8x8_test.cc
LIBAOM_TEST_SRCS-yes += partial_idct_test.cc
@@ -99,31 +99,31 @@
LIBAOM_TEST_SRCS-yes += convolve_test.cc
LIBAOM_TEST_SRCS-yes += lpf_8_test.cc
LIBAOM_TEST_SRCS-yes += intrapred_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct16x16_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct32x32_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct4x4_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct8x8_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += variance_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += quantize_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += subtract_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct16x16_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct32x32_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct4x4_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct8x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += variance_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += quantize_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += subtract_test.cc
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
LIBAOM_TEST_SRCS-$(CONFIG_SPATIAL_SVC) += svc_test.cc
endif
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_TEMPORAL_DENOISING),yesyes)
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_TEMPORAL_DENOISING),yesyes)
LIBAOM_TEST_SRCS-$(HAVE_SSE2) += denoiser_sse2_test.cc
endif
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += arf_freq_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += arf_freq_test.cc
LIBAOM_TEST_SRCS-yes += av1_inv_txfm_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += av1_dct_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_dct_test.cc
-endif # VP10
+endif # AV1
## Multi-codec / unconditional whitebox tests.
-ifeq ($(findstring yes,$(CONFIG_VP10_ENCODER)$(CONFIG_VP10_ENCODER)),yes)
+ifeq ($(findstring yes,$(CONFIG_AV1_ENCODER)$(CONFIG_AV1_ENCODER)),yes)
LIBAOM_TEST_SRCS-yes += avg_test.cc
endif
diff --git a/test/test_libaom.cc b/test/test_libaom.cc
index cfbb188..c8ea59b 100644
--- a/test/test_libaom.cc
+++ b/test/test_libaom.cc
@@ -16,9 +16,9 @@
#include "aom_ports/x86.h"
#endif
extern "C" {
-#if CONFIG_VP10
+#if CONFIG_AV1
extern void av1_rtcd();
-#endif // CONFIG_VP10
+#endif // CONFIG_AV1
extern void aom_dsp_rtcd();
extern void aom_scale_rtcd();
}
@@ -54,9 +54,9 @@
// Shared library builds don't support whitebox tests
// that exercise internal symbols.
-#if CONFIG_VP10
+#if CONFIG_AV1
av1_rtcd();
-#endif // CONFIG_VP10
+#endif // CONFIG_AV1
aom_dsp_rtcd();
aom_scale_rtcd();
#endif // !CONFIG_SHARED
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index b5d929f..dad2dcd 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -100,6 +100,6 @@
ASSERT_STREQ(md5_fw_str, md5_inv_str);
}
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
} // namespace
diff --git a/test/tools_common.sh b/test/tools_common.sh
index e79df8e..b931e34 100755
--- a/test/tools_common.sh
+++ b/test/tools_common.sh
@@ -173,15 +173,15 @@
}
# Echoes yes to stdout when aom_config_option_enabled() reports yes for
-# CONFIG_VP10_DECODER.
-vp10_decode_available() {
- [ "$(aom_config_option_enabled CONFIG_VP10_DECODER)" = "yes" ] && echo yes
+# CONFIG_AV1_DECODER.
+av1_decode_available() {
+ [ "$(aom_config_option_enabled CONFIG_AV1_DECODER)" = "yes" ] && echo yes
}
# Echoes yes to stdout when aom_config_option_enabled() reports yes for
-# CONFIG_VP10_ENCODER.
-vp10_encode_available() {
- [ "$(aom_config_option_enabled CONFIG_VP10_ENCODER)" = "yes" ] && echo yes
+# CONFIG_AV1_ENCODER.
+av1_encode_available() {
+ [ "$(aom_config_option_enabled CONFIG_AV1_ENCODER)" = "yes" ] && echo yes
}
# CONFIG_WEBM_IO.
webm_io_available() {
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 02c061a..d8da018 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -89,13 +89,13 @@
src[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -137,7 +137,7 @@
const int diff = r - src[w * y + x];
se += diff;
sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -151,7 +151,7 @@
const int diff = r - src16[w * y + x];
se += diff;
sse += diff * diff;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -226,12 +226,12 @@
if (!use_high_bit_depth_) {
src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_ * 2));
ref_ = new uint8_t[block_size_ * 2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
aom_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
@@ -241,11 +241,11 @@
if (!use_high_bit_depth_) {
aom_free(src_);
delete[] ref_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_free(CONVERT_TO_SHORTPTR(src_));
delete[] CONVERT_TO_SHORTPTR(ref_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
libaom_test::ClearSystemState();
}
@@ -273,20 +273,20 @@
for (int i = 0; i <= 255; ++i) {
if (!use_high_bit_depth_) {
memset(src_, i, block_size_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
block_size_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
for (int j = 0; j <= 255; ++j) {
if (!use_high_bit_depth_) {
memset(ref_, j, block_size_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
block_size_);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
unsigned int sse;
unsigned int var;
@@ -304,11 +304,11 @@
if (!use_high_bit_depth_) {
src_[j] = rnd_.Rand8();
ref_[j] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
unsigned int sse1, sse2;
@@ -335,11 +335,11 @@
if (!use_high_bit_depth_) {
src_[src_ind] = rnd_.Rand8();
ref_[ref_ind] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
unsigned int sse1, sse2;
@@ -363,13 +363,13 @@
memset(src_, 255, block_size_);
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
block_size_);
aom_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
unsigned int sse;
unsigned int var;
@@ -501,7 +501,7 @@
((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
se += diff;
sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -516,7 +516,7 @@
const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
se += diff;
sse += diff * diff;
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
}
}
@@ -554,7 +554,7 @@
src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
sec_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
aom_memalign(16, block_size_ * sizeof(uint16_t))));
@@ -562,7 +562,7 @@
aom_memalign(16, block_size_ * sizeof(uint16_t))));
ref_ =
CONVERT_TO_BYTEPTR(new uint16_t[block_size_ + width_ + height_ + 1]);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(sec_ != NULL);
@@ -574,12 +574,12 @@
aom_free(src_);
delete[] ref_;
aom_free(sec_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_free(CONVERT_TO_SHORTPTR(src_));
delete[] CONVERT_TO_SHORTPTR(ref_);
aom_free(CONVERT_TO_SHORTPTR(sec_));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
libaom_test::ClearSystemState();
}
@@ -611,7 +611,7 @@
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -619,7 +619,7 @@
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
@@ -647,14 +647,14 @@
memset(src_ + half, 255, half);
memset(ref_, 255, half);
memset(ref_ + half, 0, half + width_ + height_ + 1);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
aom_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
aom_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
aom_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
half + width_ + height_ + 1);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
@@ -681,7 +681,7 @@
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -690,7 +690,7 @@
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
@@ -787,7 +787,7 @@
make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_c, 0),
make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_c, 0)));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
@@ -947,7 +947,7 @@
make_tuple(3, 2, &aom_highbd_12_sub_pixel_avg_variance8x4_c, 12),
make_tuple(2, 3, &aom_highbd_12_sub_pixel_avg_variance4x8_c, 12),
make_tuple(2, 2, &aom_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if HAVE_MMX
INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
@@ -1034,7 +1034,7 @@
make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_sse, 0)));
#endif // CONFIG_USE_X86INC
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
/* TODO(debargha): This test does not support the highbd version
INSTANTIATE_TEST_CASE_P(
SSE2, VpxHBDMseTest,
@@ -1160,7 +1160,7 @@
make_tuple(3, 3, &aom_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
make_tuple(3, 2, &aom_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // HAVE_SSE2
#if HAVE_SSSE3
diff --git a/third_party/libwebm/mkvmuxer.cpp b/third_party/libwebm/mkvmuxer.cpp
index 9be3119..bf19f04 100644
--- a/third_party/libwebm/mkvmuxer.cpp
+++ b/third_party/libwebm/mkvmuxer.cpp
@@ -1045,7 +1045,7 @@
const char Tracks::kVorbisCodecId[] = "A_VORBIS";
const char Tracks::kVp8CodecId[] = "V_VP8";
const char Tracks::kVp9CodecId[] = "V_VP9";
-const char Tracks::kVp10CodecId[] = "V_VP10";
+const char Tracks::kAV1CodecId[] = "V_AV1";
Tracks::Tracks() : track_entries_(NULL), track_entries_size_(0) {}
diff --git a/third_party/libwebm/mkvmuxer.hpp b/third_party/libwebm/mkvmuxer.hpp
index 03a002c..27e0a3d 100644
--- a/third_party/libwebm/mkvmuxer.hpp
+++ b/third_party/libwebm/mkvmuxer.hpp
@@ -533,7 +533,7 @@
static const char kVorbisCodecId[];
static const char kVp8CodecId[];
static const char kVp9CodecId[];
- static const char kVp10CodecId[];
+ static const char kAV1CodecId[];
Tracks();
~Tracks();
diff --git a/tools_common.c b/tools_common.c
index dcec1ab..8d2c764 100644
--- a/tools_common.c
+++ b/tools_common.c
@@ -17,11 +17,11 @@
#include "./tools_common.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
#include "aom/vp8cx.h"
#endif
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
#include "aom/vp8dx.h"
#endif
@@ -131,8 +131,8 @@
#if CONFIG_ENCODERS
static const VpxInterface aom_encoders[] = {
-#if CONFIG_VP10_ENCODER
- { "vp10", VP10_FOURCC, &aom_codec_vp10_cx },
+#if CONFIG_AV1_ENCODER
+ { "av1", AV1_FOURCC, &aom_codec_av1_cx },
#endif
};
@@ -158,8 +158,8 @@
#if CONFIG_DECODERS
static const VpxInterface aom_decoders[] = {
-#if CONFIG_VP10_DECODER
- { "vp10", VP10_FOURCC, &aom_codec_vp10_dx },
+#if CONFIG_AV1_DECODER
+ { "av1", AV1_FOURCC, &aom_codec_av1_dx },
#endif
};
@@ -260,7 +260,7 @@
}
// TODO(debargha): Consolidate the functions below into a separate file.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_img_upshift(aom_image_t *dst, aom_image_t *src,
int input_shift) {
// Note the offset is 1 less than half.
@@ -451,4 +451,4 @@
lowbd_img_downshift(dst, src, down_shift);
}
}
-#endif // CONFIG_VPX_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/tools_common.h b/tools_common.h
index 4e7ef07..1fd39d1 100644
--- a/tools_common.h
+++ b/tools_common.h
@@ -63,7 +63,7 @@
#define VP8_FOURCC 0x30385056
#define VP9_FOURCC 0x30395056
-#define VP10_FOURCC 0x303a5056
+#define AV1_FOURCC 0x303a5056
enum VideoFileType {
FILE_TYPE_RAW,
@@ -152,7 +152,7 @@
double sse_to_psnr(double samples, double peak, double mse);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void aom_img_upshift(aom_image_t *dst, aom_image_t *src, int input_shift);
void aom_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift);
void aom_img_truncate_16_to_8(aom_image_t *dst, aom_image_t *src);
diff --git a/webmdec.cc b/webmdec.cc
index 87c29a4..54fe081 100644
--- a/webmdec.cc
+++ b/webmdec.cc
@@ -103,8 +103,8 @@
aom_ctx->fourcc = VP8_FOURCC;
} else if (!strncmp(video_track->GetCodecId(), "V_VP9", 5)) {
aom_ctx->fourcc = VP9_FOURCC;
- } else if (!strncmp(video_track->GetCodecId(), "V_VP10", 6)) {
- aom_ctx->fourcc = VP10_FOURCC;
+ } else if (!strncmp(video_track->GetCodecId(), "V_AV1", 6)) {
+ aom_ctx->fourcc = AV1_FOURCC;
} else {
rewind_and_reset(webm_ctx, aom_ctx);
return 0;
diff --git a/webmenc.cc b/webmenc.cc
index 3b475bb..75cceeb 100644
--- a/webmenc.cc
+++ b/webmenc.cc
@@ -50,8 +50,8 @@
switch (fourcc) {
case VP8_FOURCC: codec_id = "V_VP8"; break;
case VP9_FOURCC: codec_id = "V_VP9"; break;
- case VP10_FOURCC: codec_id = "V_VP10"; break;
- default: codec_id = "V_VP10"; break;
+ case AV1_FOURCC: codec_id = "V_AV1"; break;
+ default: codec_id = "V_AV1"; break;
}
video_track->set_codec_id(codec_id);
if (par->numerator > 1 || par->denominator > 1) {